From 5b5f477b54aa4c31986b749b13d945f6c006b24a Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Fri, 31 Jan 2025 10:04:01 +0100 Subject: [PATCH 01/96] bump version --- config/version.txt | 2 +- .../java/co/elastic/clients/transport/VersionInfo.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/version.txt b/config/version.txt index f7ee06693..47da986f8 100644 --- a/config/version.txt +++ b/config/version.txt @@ -1 +1 @@ -9.0.0 +9.1.0 diff --git a/java-client/src/main-flavored/java/co/elastic/clients/transport/VersionInfo.java b/java-client/src/main-flavored/java/co/elastic/clients/transport/VersionInfo.java index 016915928..43105b579 100644 --- a/java-client/src/main-flavored/java/co/elastic/clients/transport/VersionInfo.java +++ b/java-client/src/main-flavored/java/co/elastic/clients/transport/VersionInfo.java @@ -21,5 +21,5 @@ // Package private class VersionInfo { - static final String VERSION = "9.0.0"; + static final String VERSION = "9.1.0"; } From 780bb8f9fc4f6c187c4a8d7bb900929925482dc5 Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Fri, 31 Jan 2025 10:16:32 +0100 Subject: [PATCH 02/96] pipeline update --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 8a9c2615d..4ec36a4e3 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -2,7 +2,7 @@ steps: - label: ":java: :elasticsearch: Elasticsearch Java API client - {{matrix.workflow}}" agents: provider: "gcp" - branches: [ "main", "7.17", "8.15", "8.x", "9.0" ] + branches: [ "main", "7.17", "8.17", "8.18", "8.x", "9.0" ] matrix: setup: workflow: From 4009a7662d9d07cccf673c1948d2616969d5f855 Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Fri, 31 Jan 2025 17:09:22 +0100 Subject: [PATCH 03/96] [codegen] update to latest spec --- .../ElasticsearchAsyncClient.java | 4602 +++++++++++++++-- .../elasticsearch/ElasticsearchClient.java | 4574 +++++++++++++++- .../_helpers/esql/EsqlHelper.java | 2 +- .../elasticsearch/_types/ErrorCause.java | 4 +- .../elasticsearch/_types/ExpandWildcard.java | 4 +- .../elasticsearch/_types/KnnQuery.java | 42 + .../elasticsearch/_types/KnnRetriever.java | 42 + .../elasticsearch/_types/KnnSearch.java | 42 + .../elasticsearch/_types/RescoreVector.java | 158 + .../clients/elasticsearch/_types/Retries.java | 16 +- .../elasticsearch/_types/ShardStatistics.java | 18 +- .../elasticsearch/_types/SortOptions.java | 2 +- .../elasticsearch/_types/StoredScript.java | 15 +- .../elasticsearch/_types/VersionType.java | 14 +- .../_types/WriteResponseBase.java | 60 +- .../_types/analysis/Normalizer.java | 2 +- .../mapping/CountedKeywordProperty.java | 157 + .../_types/mapping/FieldType.java | 2 + .../_types/mapping/GeoShapeProperty.java | 2 +- .../_types/mapping/Property.java | 32 + .../_types/mapping/PropertyBase.java | 28 + .../_types/mapping/PropertyBuilders.java | 19 + .../_types/mapping/ShapeProperty.java | 2 +- .../mapping/SyntheticSourceKeepEnum.java | 86 + .../_types/query_dsl/FieldAndFormat.java | 8 +- .../elasticsearch/_types/query_dsl/Like.java | 2 +- .../ElasticsearchAsyncSearchAsyncClient.java | 24 +- .../ElasticsearchAsyncSearchClient.java | 24 +- .../async_search/SubmitRequest.java | 42 + .../ElasticsearchAutoscalingAsyncClient.java | 18 +- .../ElasticsearchAutoscalingClient.java | 18 +- .../elasticsearch/cat/AliasesRequest.java | 45 +- .../elasticsearch/cat/AllocationRequest.java | 23 +- .../cat/ComponentTemplatesRequest.java | 26 +- .../elasticsearch/cat/CountRequest.java | 34 +- .../cat/ElasticsearchCatAsyncClient.java | 1080 ++-- .../cat/ElasticsearchCatClient.java | 1080 ++-- .../elasticsearch/cat/FielddataRequest.java | 11 +- .../elasticsearch/cat/HealthRequest.java | 25 +- .../elasticsearch/cat/HelpRequest.java | 4 +- .../elasticsearch/cat/IndicesRequest.java | 6 +- .../elasticsearch/cat/MasterRequest.java | 13 +- .../cat/MlDataFrameAnalyticsRequest.java | 12 +- .../elasticsearch/cat/MlDatafeedsRequest.java | 18 +- .../elasticsearch/cat/MlJobsRequest.java | 18 +- .../cat/MlTrainedModelsRequest.java | 11 +- .../elasticsearch/cat/NodeattrsRequest.java | 10 +- .../elasticsearch/cat/NodesRequest.java | 10 +- .../cat/PendingTasksRequest.java | 11 +- .../elasticsearch/cat/PluginsRequest.java | 10 +- .../elasticsearch/cat/RecoveryRequest.java | 19 +- .../cat/RepositoriesRequest.java | 11 +- .../elasticsearch/cat/SegmentsRequest.java | 12 +- .../elasticsearch/cat/ShardsRequest.java | 10 +- .../elasticsearch/cat/SnapshotsRequest.java | 13 +- .../elasticsearch/cat/TasksRequest.java | 10 +- .../elasticsearch/cat/TemplatesRequest.java | 13 +- .../elasticsearch/cat/ThreadPoolRequest.java | 12 +- .../elasticsearch/cat/TransformsRequest.java | 5 +- .../ComponentTemplate.java | 17 +- .../ccr/ElasticsearchCcrAsyncClient.java | 56 +- .../ccr/ElasticsearchCcrClient.java | 56 +- .../cluster/ComponentTemplateNode.java | 30 + .../ElasticsearchClusterAsyncClient.java | 154 +- .../cluster/ElasticsearchClusterClient.java | 154 +- .../elasticsearch/cluster/HealthRequest.java | 8 +- .../cluster/PutClusterSettingsRequest.java | 8 +- .../cluster/RemoteInfoRequest.java | 22 +- .../remote_info/ClusterRemoteProxyInfo.java | 106 +- .../remote_info/ClusterRemoteSniffInfo.java | 75 +- .../reroute/CommandAllocateReplicaAction.java | 2 +- .../elasticsearch/connector/Connector.java | 24 + .../connector/DeleteConnectorRequest.java | 30 + .../ElasticsearchConnectorAsyncClient.java | 124 +- .../ElasticsearchConnectorClient.java | 124 +- .../connector/GetConnectorRequest.java | 37 +- .../elasticsearch/connector/ListRequest.java | 33 + .../elasticsearch/core/BulkRequest.java | 290 +- .../elasticsearch/core/BulkResponse.java | 40 +- .../core/ClearScrollRequest.java | 11 +- .../core/ClearScrollResponse.java | 18 +- .../core/ClosePointInTimeRequest.java | 14 +- .../core/ClosePointInTimeResponse.java | 18 +- .../elasticsearch/core/CountRequest.java | 133 +- .../elasticsearch/core/CreateRequest.java | 258 +- .../core/DeleteByQueryRequest.java | 329 +- .../core/DeleteByQueryResponse.java | 94 + .../core/DeleteByQueryRethrottleRequest.java | 6 +- .../elasticsearch/core/DeleteRequest.java | 141 +- .../core/DeleteScriptRequest.java | 31 +- .../elasticsearch/core/ExistsRequest.java | 144 +- .../core/ExistsSourceRequest.java | 76 +- .../elasticsearch/core/ExplainRequest.java | 110 +- .../elasticsearch/core/FieldCapsRequest.java | 86 +- .../elasticsearch/core/FieldCapsResponse.java | 15 +- .../elasticsearch/core/GetRequest.java | 220 +- .../elasticsearch/core/GetScriptRequest.java | 19 +- .../elasticsearch/core/GetSourceRequest.java | 83 +- .../elasticsearch/core/IndexRequest.java | 328 +- .../elasticsearch/core/KnnSearchRequest.java | 119 +- .../elasticsearch/core/KnnSearchResponse.java | 36 +- .../elasticsearch/core/MgetRequest.java | 18 + .../elasticsearch/core/MgetResponse.java | 28 +- .../core/MsearchTemplateRequest.java | 38 +- .../core/MtermvectorsRequest.java | 70 +- .../core/OpenPointInTimeRequest.java | 106 +- .../elasticsearch/core/PutScriptRequest.java | 52 +- .../elasticsearch/core/RankEvalRequest.java | 6 +- .../elasticsearch/core/ReindexRequest.java | 400 +- .../elasticsearch/core/ReindexResponse.java | 96 + .../core/ReindexRethrottleRequest.java | 22 +- .../core/RenderSearchTemplateRequest.java | 67 +- .../core/ScriptsPainlessExecuteRequest.java | 52 +- .../elasticsearch/core/ScrollRequest.java | 10 +- .../elasticsearch/core/SearchMvtRequest.java | 612 ++- .../elasticsearch/core/SearchRequest.java | 600 ++- .../core/SearchShardsRequest.java | 44 +- .../core/SearchTemplateRequest.java | 54 +- .../elasticsearch/core/TermsEnumRequest.java | 94 +- .../elasticsearch/core/TermsEnumResponse.java | 12 +- .../core/TermvectorsRequest.java | 163 +- .../core/UpdateByQueryRequest.java | 340 +- .../core/UpdateByQueryResponse.java | 94 + .../core/UpdateByQueryRethrottleRequest.java | 6 +- .../elasticsearch/core/UpdateRequest.java | 148 +- .../core/bulk/BulkOperationBase.java | 8 +- .../core/bulk/BulkResponseItem.java | 54 +- .../elasticsearch/core/bulk/UpdateAction.java | 42 +- .../core/bulk/UpdateOperation.java | 10 +- .../core/bulk/WriteOperation.java | 38 +- .../elasticsearch/core/get/GetResult.java | 66 +- .../core/msearch_template/TemplateConfig.java | 16 +- ...ankEvalMetricDiscountedCumulativeGain.java | 2 +- .../RankEvalMetricExpectedReciprocalRank.java | 2 +- .../RankEvalMetricMeanReciprocalRank.java | 2 +- .../rank_eval/RankEvalMetricPrecision.java | 2 +- .../core/rank_eval/RankEvalMetricRecall.java | 2 +- .../core/reindex/Destination.java | 32 +- .../core/reindex/RemoteSource.java | 16 +- .../elasticsearch/core/reindex/Source.java | 82 +- .../PainlessContext.java | 127 + .../PainlessContextSetup.java | 30 +- .../elasticsearch/core/search/Context.java | 2 +- .../core/search/ResponseBody.java | 84 +- .../core/termvectors/Filter.java | 8 +- ...asticsearchDanglingIndicesAsyncClient.java | 10 +- .../ElasticsearchDanglingIndicesClient.java | 10 +- .../elasticsearch/doc-files/api-spec.html | 1589 +++--- .../ElasticsearchEnrichAsyncClient.java | 24 +- .../enrich/ElasticsearchEnrichClient.java | 24 +- .../eql/ElasticsearchEqlAsyncClient.java | 24 +- .../eql/ElasticsearchEqlClient.java | 24 +- .../elasticsearch/eql/EqlSearchRequest.java | 20 + .../esql/AsyncQueryStopRequest.java | 232 + .../esql/ElasticsearchEsqlAsyncClient.java | 45 +- .../esql/ElasticsearchEsqlClient.java | 46 +- .../esql/{query => }/EsqlFormat.java | 4 +- .../elasticsearch/esql/QueryRequest.java | 1 - .../ElasticsearchFeaturesAsyncClient.java | 12 +- .../features/ElasticsearchFeaturesClient.java | 12 +- .../fleet/ElasticsearchFleetAsyncClient.java | 16 +- .../fleet/ElasticsearchFleetClient.java | 16 +- .../graph/ElasticsearchGraphAsyncClient.java | 4 +- .../graph/ElasticsearchGraphClient.java | 4 +- .../ilm/ElasticsearchIlmAsyncClient.java | 54 +- .../ilm/ElasticsearchIlmClient.java | 54 +- .../ilm/GetIlmStatusRequest.java | 4 +- .../indices/AddBlockRequest.java | 103 +- .../indices/CancelMigrateReindexRequest.java | 202 + .../indices/CancelMigrateReindexResponse.java | 109 + .../indices/CreateDataStreamRequest.java | 5 +- .../indices/CreateFromRequest.java | 271 + ...eResponse.java => CreateFromResponse.java} | 56 +- .../indices/DataStreamsStatsRequest.java | 4 +- .../ElasticsearchIndicesAsyncClient.java | 694 ++- .../indices/ElasticsearchIndicesClient.java | 699 ++- .../indices/ExistsAliasRequest.java | 4 +- .../indices/ExistsIndexTemplateRequest.java | 4 +- .../indices/GetDataLifecycleRequest.java | 5 +- .../indices/GetDataStreamRequest.java | 4 +- .../GetMigrateReindexStatusRequest.java | 203 + .../GetMigrateReindexStatusResponse.java | 451 ++ .../elasticsearch/indices/IndexSettings.java | 2 +- .../indices/IndexSettingsLifecycle.java | 35 + .../indices/MappingLimitSettings.java | 39 +- .../MappingLimitSettingsSourceFields.java | 157 + .../indices/MigrateReindexRequest.java | 191 + .../indices/MigrateReindexResponse.java | 109 + .../indices/ResolveClusterRequest.java | 210 +- .../elasticsearch/indices/SourceMode.java | 68 + .../indices/UnfreezeRequest.java | 430 -- .../add_block/IndicesBlockOptions.java | 12 + .../indices/create_from/CreateFrom.java | 249 + .../StatusError.java | 178 + .../StatusInProgress.java | 203 + .../migrate_reindex/MigrateReindex.java | 196 + .../indices/migrate_reindex/ModeEnum.java | 64 + .../migrate_reindex/SourceIndex.java} | 79 +- .../inference/DeleteInferenceRequest.java | 16 +- .../ElasticsearchInferenceAsyncClient.java | 121 +- .../ElasticsearchInferenceClient.java | 122 +- .../inference/InferenceRequest.java | 73 +- .../inference/UpdateInferenceRequest.java | 298 ++ .../inference/UpdateInferenceResponse.java | 107 + .../ingest/DeleteGeoipDatabaseRequest.java | 23 +- .../ElasticsearchIngestAsyncClient.java | 121 +- .../ingest/ElasticsearchIngestClient.java | 121 +- .../ingest/GetGeoipDatabaseRequest.java | 11 +- .../ingest/GetPipelineRequest.java | 6 +- .../ingest/PutGeoipDatabaseRequest.java | 5 +- .../elasticsearch/ingest/SimulateRequest.java | 29 +- .../license/DeleteLicenseRequest.java | 23 +- .../ElasticsearchLicenseAsyncClient.java | 139 +- .../license/ElasticsearchLicenseClient.java | 139 +- .../license/GetLicenseRequest.java | 12 +- .../elasticsearch/license/PostRequest.java | 30 +- .../license/PostStartBasicRequest.java | 6 +- .../ElasticsearchLogstashAsyncClient.java | 14 +- .../logstash/ElasticsearchLogstashClient.java | 14 +- .../migration/DeprecationsResponse.java | 121 + .../ElasticsearchMigrationAsyncClient.java | 10 +- .../ElasticsearchMigrationClient.java | 10 +- ...earTrainedModelDeploymentCacheRequest.java | 12 +- .../elasticsearch/ml/CloseJobRequest.java | 30 +- .../ml/DeleteCalendarRequest.java | 5 +- .../ml/DeleteExpiredDataRequest.java | 18 +- .../elasticsearch/ml/DeleteFilterRequest.java | 7 +- .../ml/DeleteForecastRequest.java | 10 +- .../elasticsearch/ml/DeleteJobRequest.java | 14 +- .../ml/DeleteModelSnapshotRequest.java | 9 +- .../ml/DeleteTrainedModelAliasRequest.java | 9 +- .../ml/DeleteTrainedModelRequest.java | 6 +- .../ml/ElasticsearchMlAsyncClient.java | 748 +-- .../ml/ElasticsearchMlClient.java | 748 +-- .../ml/EstimateModelMemoryRequest.java | 8 +- .../ml/EvaluateDataFrameRequest.java | 11 +- .../ml/ExplainDataFrameAnalyticsRequest.java | 8 +- .../elasticsearch/ml/OpenJobRequest.java | 14 +- .../ml/PreviewDataFrameAnalyticsRequest.java | 4 +- .../elasticsearch/ml/PutJobRequest.java | 9 +- .../ml/UpgradeJobSnapshotRequest.java | 16 +- .../ElasticsearchMonitoringAsyncClient.java | 8 +- .../ElasticsearchMonitoringClient.java | 8 +- .../nodes/ElasticsearchNodesAsyncClient.java | 56 +- .../nodes/ElasticsearchNodesClient.java | 56 +- .../elasticsearch/nodes/NodesInfoRequest.java | 6 +- .../ElasticsearchQueryRulesAsyncClient.java | 34 +- .../ElasticsearchQueryRulesClient.java | 34 +- .../ElasticsearchRollupAsyncClient.java | 40 +- .../rollup/ElasticsearchRollupClient.java | 40 +- .../DeleteSearchApplicationRequest.java | 10 +- ...ticsearchSearchApplicationAsyncClient.java | 60 +- .../ElasticsearchSearchApplicationClient.java | 60 +- ...csearchSearchableSnapshotsAsyncClient.java | 22 +- ...lasticsearchSearchableSnapshotsClient.java | 22 +- .../security/ActivateUserProfileRequest.java | 54 +- .../ClearCachedPrivilegesRequest.java | 8 +- .../security/ClearCachedRealmsRequest.java | 24 +- .../security/ClearCachedRolesRequest.java | 12 +- .../ClearCachedServiceTokensRequest.java | 36 +- .../security/CreateApiKeyRequest.java | 117 +- .../security/CreateServiceTokenRequest.java | 37 +- .../security/DelegatePkiRequest.java | 17 +- .../security/DeletePrivilegesRequest.java | 22 +- .../security/DeleteRoleMappingRequest.java | 13 +- .../security/DeleteRoleMappingResponse.java | 10 +- .../security/DeleteRoleRequest.java | 8 +- .../security/DeleteRoleResponse.java | 10 +- .../security/DeleteServiceTokenRequest.java | 12 +- .../security/DeleteServiceTokenResponse.java | 12 +- .../security/DeleteUserRequest.java | 4 +- .../security/DeleteUserResponse.java | 12 +- .../security/DisableUserProfileRequest.java | 19 +- .../security/DisableUserRequest.java | 7 +- .../ElasticsearchSecurityAsyncClient.java | 1654 ++++-- .../security/ElasticsearchSecurityClient.java | 1658 ++++-- .../security/EnableUserProfileRequest.java | 22 +- .../security/EnableUserRequest.java | 7 +- .../security/EnrollKibanaRequest.java | 4 + .../security/EnrollKibanaResponse.java | 12 +- .../security/EnrollNodeRequest.java | 6 + .../security/EnrollNodeResponse.java | 73 +- .../GetBuiltinPrivilegesResponse.java | 45 +- .../security/GetPrivilegesRequest.java | 27 +- .../security/GetRoleRequest.java | 4 +- .../security/GetSecuritySettingsRequest.java | 184 + .../security/GetSecuritySettingsResponse.java | 245 + .../security/GetServiceAccountsRequest.java | 19 +- .../GetServiceCredentialsRequest.java | 21 +- .../GetServiceCredentialsResponse.java | 12 +- .../security/GetTokenRequest.java | 70 +- .../security/GetUserPrivilegesRequest.java | 5 + .../security/GetUserProfileRequest.java | 33 +- .../security/GetUserProfileResponse.java | 32 +- .../security/GetUserRequest.java | 6 +- .../security/GrantApiKeyRequest.java | 35 +- .../elasticsearch/security/GrantType.java | 4 +- .../security/HasPrivilegesRequest.java | 4 +- .../HasPrivilegesUserProfileRequest.java | 18 +- .../security/InvalidateApiKeyRequest.java | 32 +- .../security/InvalidateApiKeyResponse.java | 49 +- .../security/InvalidateTokenRequest.java | 34 + .../security/InvalidateTokenResponse.java | 40 +- .../security/OidcAuthenticateRequest.java | 8 +- .../security/OidcLogoutRequest.java | 7 +- .../OidcPrepareAuthenticationRequest.java | 7 +- .../security/PutPrivilegesRequest.java | 31 + .../security/PutRoleMappingRequest.java | 98 +- .../security/PutRoleRequest.java | 16 + .../security/PutRoleResponse.java | 15 +- .../security/PutUserRequest.java | 94 +- .../security/PutUserResponse.java | 12 +- .../security/QueryApiKeysRequest.java | 118 +- .../security/QueryRoleRequest.java | 77 +- .../security/QueryRoleResponse.java | 36 +- .../security/QueryUserRequest.java | 84 +- .../security/QueryUserResponse.java | 8 +- .../elasticsearch/security/Restriction.java | 18 +- .../security/RoleDescriptor.java | 14 +- .../security/RoleDescriptorRead.java | 18 +- .../security/SamlAuthenticateRequest.java | 33 +- .../security/SamlAuthenticateResponse.java | 40 +- .../security/SamlCompleteLogoutRequest.java | 18 +- .../security/SamlInvalidateRequest.java | 48 +- .../security/SamlInvalidateResponse.java | 28 +- .../security/SamlLogoutRequest.java | 14 +- .../security/SamlLogoutResponse.java | 12 +- .../SamlPrepareAuthenticationRequest.java | 36 +- .../SamlPrepareAuthenticationResponse.java | 28 +- .../SamlServiceProviderMetadataRequest.java | 5 + .../SamlServiceProviderMetadataResponse.java | 10 +- .../security/SecuritySettings.java | 166 + .../security/SuggestUserProfilesRequest.java | 56 +- .../security/SuggestUserProfilesResponse.java | 42 +- .../security/UpdateApiKeyRequest.java | 144 +- .../security/UpdateApiKeyResponse.java | 4 +- .../UpdateCrossClusterApiKeyRequest.java | 25 +- .../security/UpdateSettingsRequest.java | 390 ++ .../security/UpdateSettingsResponse.java | 157 + .../UpdateUserProfileDataRequest.java | 57 +- .../security/enroll_kibana/Token.java | 22 +- .../get_token/AccessTokenGrantType.java | 27 + .../security/grant_api_key/GrantApiKey.java | 30 +- .../ApplicationPrivilegesCheck.java | 18 +- .../has_privileges/IndexPrivilegesCheck.java | 28 +- .../security/suggest_user_profiles/Hint.java | 6 +- .../ElasticsearchShutdownAsyncClient.java | 14 +- .../shutdown/ElasticsearchShutdownClient.java | 14 +- .../ElasticsearchSimulateAsyncClient.java | 4 +- .../simulate/ElasticsearchSimulateClient.java | 4 +- .../slm/DeleteLifecycleRequest.java | 18 +- .../slm/ElasticsearchSlmAsyncClient.java | 48 +- .../slm/ElasticsearchSlmClient.java | 48 +- .../slm/ExecuteLifecycleRequest.java | 18 +- .../slm/ExecuteRetentionRequest.java | 18 +- .../slm/GetLifecycleRequest.java | 18 +- .../slm/GetSlmStatusRequest.java | 24 +- .../slm/PutLifecycleRequest.java | 24 +- .../elasticsearch/slm/SnapshotLifecycle.java | 18 +- .../elasticsearch/slm/StartSlmRequest.java | 24 +- .../elasticsearch/slm/StopSlmRequest.java | 24 +- .../ElasticsearchSnapshotAsyncClient.java | 397 +- .../snapshot/ElasticsearchSnapshotClient.java | 399 +- .../snapshot/RepositoryAnalyzeRequest.java | 742 +++ .../snapshot/RepositoryAnalyzeResponse.java | 804 +++ .../snapshot/S3RepositorySettings.java | 8 +- .../repository_analyze/BlobDetails.java | 363 ++ .../repository_analyze/DetailsInfo.java | 431 ++ .../repository_analyze/ReadBlobDetails.java | 484 ++ .../repository_analyze/ReadSummaryInfo.java | 502 ++ .../repository_analyze/SnapshotNodeInfo.java | 178 + .../repository_analyze/SummaryInfo.java | 209 + .../repository_analyze/WriteSummaryInfo.java | 355 ++ .../elasticsearch/sql/DeleteAsyncRequest.java | 13 +- .../sql/ElasticsearchSqlAsyncClient.java | 56 +- .../sql/ElasticsearchSqlClient.java | 56 +- .../elasticsearch/sql/GetAsyncRequest.java | 39 +- .../elasticsearch/sql/GetAsyncResponse.java | 34 +- .../sql/GetAsyncStatusRequest.java | 4 +- .../sql/GetAsyncStatusResponse.java | 82 +- .../elasticsearch/sql/QueryRequest.java | 186 +- .../elasticsearch/sql/QueryResponse.java | 204 +- .../elasticsearch/sql/TranslateRequest.java | 17 +- .../ssl/ElasticsearchSslAsyncClient.java | 2 +- .../ssl/ElasticsearchSslClient.java | 2 +- .../certificates/CertificateInformation.java | 66 +- .../synonyms/DeleteSynonymRequest.java | 25 +- .../synonyms/DeleteSynonymRuleRequest.java | 8 +- .../ElasticsearchSynonymsAsyncClient.java | 92 +- .../synonyms/ElasticsearchSynonymsClient.java | 92 +- .../synonyms/GetSynonymRequest.java | 12 +- .../synonyms/GetSynonymResponse.java | 24 +- .../synonyms/GetSynonymRuleRequest.java | 8 +- .../synonyms/GetSynonymsSetsRequest.java | 8 +- .../synonyms/GetSynonymsSetsResponse.java | 28 +- .../synonyms/PutSynonymRequest.java | 17 +- .../synonyms/PutSynonymRuleRequest.java | 23 +- .../elasticsearch/synonyms/SynonymRule.java | 16 +- .../synonyms/SynonymsUpdateResult.java | 10 +- .../elasticsearch/tasks/CancelRequest.java | 54 +- .../tasks/ElasticsearchTasksAsyncClient.java | 277 +- .../tasks/ElasticsearchTasksClient.java | 277 +- .../elasticsearch/tasks/GetTasksRequest.java | 22 +- .../clients/elasticsearch/tasks/GroupBy.java | 4 +- .../elasticsearch/tasks/ListRequest.java | 134 +- ...ElasticsearchTextStructureAsyncClient.java | 78 +- .../ElasticsearchTextStructureClient.java | 78 +- .../FindFieldStructureRequest.java | 41 +- .../FindMessageStructureRequest.java | 12 +- .../TestGrokPatternRequest.java | 22 +- .../transform/DeleteTransformRequest.java | 2 +- .../ElasticsearchTransformAsyncClient.java | 162 +- .../ElasticsearchTransformClient.java | 162 +- .../transform/GetTransformRequest.java | 2 +- .../transform/GetTransformStatsRequest.java | 4 +- .../transform/ResetTransformRequest.java | 8 +- .../ScheduleNowTransformRequest.java | 12 +- .../transform/StartTransformRequest.java | 2 +- .../transform/UpgradeTransformsRequest.java | 16 +- .../watcher/AckWatchRequest.java | 18 +- .../watcher/ActivateWatchRequest.java | 4 +- .../watcher/DeactivateWatchRequest.java | 4 +- .../watcher/DeleteWatchRequest.java | 4 +- .../ElasticsearchWatcherAsyncClient.java | 219 +- .../watcher/ElasticsearchWatcherClient.java | 223 +- .../watcher/ExecuteWatchRequest.java | 34 +- .../watcher/ExecuteWatchResponse.java | 25 +- .../watcher/ExecutionThreadPool.java | 20 +- .../watcher/GetWatchRequest.java | 4 +- .../watcher/GetWatcherSettingsRequest.java | 179 + .../watcher/GetWatcherSettingsResponse.java | 163 + .../watcher/PutWatchRequest.java | 126 +- .../watcher/QueryWatchesRequest.java | 49 +- .../watcher/QueryWatchesResponse.java | 28 +- .../watcher/StopWatcherRequest.java | 12 +- .../watcher/UpdateSettingsRequest.java | 310 ++ .../watcher/UpdateSettingsResponse.java | 157 + .../watcher/WatcherStatsRequest.java | 3 +- .../watcher/stats/WatchRecordQueuedStats.java | 10 +- .../watcher/stats/WatchRecordStats.java | 24 +- .../watcher/stats/WatcherNodeStats.java | 92 +- .../xpack/ElasticsearchXpackAsyncClient.java | 12 +- .../xpack/ElasticsearchXpackClient.java | 12 +- .../xpack/XpackUsageRequest.java | 9 +- .../xpack/XpackUsageResponse.java | 32 - .../elasticsearch/xpack/info/Features.java | 31 - .../xpack/usage/IlmPolicyStatistics.java | 1 - .../elasticsearch/xpack/usage/Phase.java | 199 + .../elasticsearch/xpack/usage/Phases.java | 308 ++ .../_helpers/esql/EsqlAdapterTest.java | 2 +- 450 files changed, 38673 insertions(+), 9245 deletions(-) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RescoreVector.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/CountedKeywordProperty.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/SyntheticSourceKeepEnum.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContext.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/esql/AsyncQueryStopRequest.java rename java-client/src/main/java/co/elastic/clients/elasticsearch/esql/{query => }/EsqlFormat.java (94%) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromRequest.java rename java-client/src/main/java/co/elastic/clients/elasticsearch/indices/{UnfreezeResponse.java => CreateFromResponse.java} (75%) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MappingLimitSettingsSourceFields.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SourceMode.java delete mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/create_from/CreateFrom.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_migrate_reindex_status/StatusError.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_migrate_reindex_status/StatusInProgress.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/migrate_reindex/MigrateReindex.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/migrate_reindex/ModeEnum.java rename java-client/src/main/java/co/elastic/clients/elasticsearch/{xpack/usage/FrozenIndices.java => indices/migrate_reindex/SourceIndex.java} (58%) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/UpdateInferenceRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/UpdateInferenceResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetSecuritySettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetSecuritySettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/security/SecuritySettings.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/security/UpdateSettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/security/UpdateSettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/BlobDetails.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/DetailsInfo.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/ReadBlobDetails.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/ReadSummaryInfo.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/SnapshotNodeInfo.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/SummaryInfo.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/repository_analyze/WriteSummaryInfo.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatcherSettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatcherSettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/UpdateSettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/UpdateSettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/usage/Phase.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/usage/Phases.java diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java index fa23034ac..7503861bf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java @@ -357,12 +357,169 @@ public ElasticsearchXpackAsyncClient xpack() { // ----- Endpoint: bulk /** - * Bulk index or delete documents. Performs multiple indexing or delete - * operations in a single API call. This reduces overhead and can greatly - * increase indexing speed. + * Bulk index or delete documents. Perform multiple index, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -374,15 +531,172 @@ public CompletableFuture bulk(BulkRequest request) { } /** - * Bulk index or delete documents. Performs multiple indexing or delete - * operations in a single API call. This reduces overhead and can greatly - * increase indexing speed. + * Bulk index or delete documents. Perform multiple index, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @param fn * a function that initializes a builder to create the * {@link BulkRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -391,12 +705,169 @@ public final CompletableFuture bulk(Functionindex, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -408,12 +879,11 @@ public CompletableFuture bulk() { // ----- Endpoint: clear_scroll /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -425,15 +895,14 @@ public CompletableFuture clearScroll(ClearScrollRequest req } /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @param fn * a function that initializes a builder to create the * {@link ClearScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -443,12 +912,11 @@ public final CompletableFuture clearScroll( } /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -460,17 +928,15 @@ public CompletableFuture clearScroll() { // ----- Endpoint: close_point_in_time /** - * Close a point in time. - *

- * A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. A point in time is automatically closed when the - * keep_alive period has elapsed. However, keeping points in time - * has a cost; close them as soon as they are no longer required for search - * requests. + * Close a point in time. A point in time must be opened explicitly before being + * used in search requests. The keep_alive parameter tells + * Elasticsearch how long it should persist. A point in time is automatically + * closed when the keep_alive period has elapsed. However, keeping + * points in time has a cost; close them as soon as they are no longer required + * for search requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -482,20 +948,18 @@ public CompletableFuture closePointInTime(ClosePointIn } /** - * Close a point in time. - *

- * A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. A point in time is automatically closed when the - * keep_alive period has elapsed. However, keeping points in time - * has a cost; close them as soon as they are no longer required for search - * requests. + * Close a point in time. A point in time must be opened explicitly before being + * used in search requests. The keep_alive parameter tells + * Elasticsearch how long it should persist. A point in time is automatically + * closed when the keep_alive period has elapsed. However, keeping + * points in time has a cost; close them as soon as they are no longer required + * for search requests. * * @param fn * a function that initializes a builder to create the * {@link ClosePointInTimeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -508,9 +972,21 @@ public final CompletableFuture closePointInTime( /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -523,12 +999,24 @@ public CompletableFuture count(CountRequest request) { /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @param fn * a function that initializes a builder to create the * {@link CountRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -539,9 +1027,21 @@ public final CompletableFuture count( /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -553,12 +1053,132 @@ public CompletableFuture count() { // ----- Endpoint: create /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create a new document in the index. + *

+ * You can index a new JSON document with the /<target>/_doc/ + * or /<target>/_create/<_id> APIs Using + * _create guarantees that the document is indexed only if it does + * not already exist. It returns a 409 response when a document with a same ID + * already exists in the index. To update an existing document, you must use the + * /<target>/_doc/ API. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add a document using the + * PUT /<target>/_create/<_id> or + * POST /<target>/_create/<_id> request formats, you + * must have the create_doc, create, + * index, or write index privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -570,15 +1190,135 @@ public CompletableFuture create(CreateRequest + * You can index a new JSON document with the /<target>/_doc/ + * or /<target>/_create/<_id> APIs Using + * _create guarantees that the document is indexed only if it does + * not already exist. It returns a 409 response when a document with a same ID + * already exists in the index. To update an existing document, you must use the + * /<target>/_doc/ API. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add a document using the + * PUT /<target>/_create/<_id> or + * POST /<target>/_create/<_id> request formats, you + * must have the create_doc, create, + * index, or write index privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. * * @param fn * a function that initializes a builder to create the * {@link CreateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -590,10 +1330,60 @@ public final CompletableFuture create( // ----- Endpoint: delete /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. + *

+ * Remove a JSON document from the specified index. + *

+ * NOTE: You cannot send deletion requests directly to a data stream. To delete + * a document in a data stream, you must target the backing index containing the + * document. + *

+ * Optimistic concurrency control + *

+ * Delete operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Versioning + *

+ * Each document indexed is versioned. When deleting a document, the version can + * be specified to make sure the relevant document you are trying to delete is + * actually being deleted and it has not changed in the meantime. Every write + * operation run on a document, deletes included, causes its version to be + * incremented. The version number of a deleted document remains available for a + * short time after deletion to allow for control of concurrent operations. The + * length of time for which a deleted document's version remains available is + * determined by the index.gc_deletes index setting. + *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to delete a document. + *

+ * If the _routing mapping is set to required and no + * routing value is specified, the delete API throws a + * RoutingMissingException and rejects the request. + *

+ * For example: + * + *

+	 * DELETE /my-index-000001/_doc/1?routing=shard-1
+	 * 
+	 * 
+ *

+ * This request deletes the document with ID 1, but it is routed based on the + * user. The document is not deleted if the correct routing is not specified. + *

+ * Distributed + *

+ * The delete operation gets hashed into a specific shard ID. It then gets + * redirected into the primary shard within that ID group and replicated (if + * needed) to shard replicas within that ID group. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete">Documentation * on elastic.co */ @@ -605,13 +1395,63 @@ public CompletableFuture delete(DeleteRequest request) { } /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. + *

+ * Remove a JSON document from the specified index. + *

+ * NOTE: You cannot send deletion requests directly to a data stream. To delete + * a document in a data stream, you must target the backing index containing the + * document. + *

+ * Optimistic concurrency control + *

+ * Delete operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Versioning + *

+ * Each document indexed is versioned. When deleting a document, the version can + * be specified to make sure the relevant document you are trying to delete is + * actually being deleted and it has not changed in the meantime. Every write + * operation run on a document, deletes included, causes its version to be + * incremented. The version number of a deleted document remains available for a + * short time after deletion to allow for control of concurrent operations. The + * length of time for which a deleted document's version remains available is + * determined by the index.gc_deletes index setting. + *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to delete a document. + *

+ * If the _routing mapping is set to required and no + * routing value is specified, the delete API throws a + * RoutingMissingException and rejects the request. + *

+ * For example: + * + *

+	 * DELETE /my-index-000001/_doc/1?routing=shard-1
+	 * 
+	 * 
+ *

+ * This request deletes the document with ID 1, but it is routed based on the + * user. The document is not deleted if the correct routing is not specified. + *

+ * Distributed + *

+ * The delete operation gets hashed into a specific shard ID. It then gets + * redirected into the primary shard within that ID group and replicated (if + * needed) to shard replicas within that ID group. * * @param fn * a function that initializes a builder to create the * {@link DeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete">Documentation * on elastic.co */ @@ -623,10 +1463,139 @@ public final CompletableFuture delete( // ----- Endpoint: delete_by_query /** - * Delete documents. Deletes documents that match the specified query. + * Delete documents. + *

+ * Deletes documents that match the specified query. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • delete or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. When you submit a delete by query + * request, Elasticsearch gets a snapshot of the data stream or index when it + * begins processing the request and deletes matching documents using internal + * versioning. If a document changes between the time that the snapshot is taken + * and the delete operation is processed, it results in a version conflict and + * the delete operation fails. + *

+ * NOTE: Documents with a version equal to 0 cannot be deleted using delete by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing a delete by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents to delete. + * A bulk delete request is performed for each batch of matching documents. If a + * search or bulk request is rejected, the requests are retried up to 10 times, + * with exponential back off. If the maximum retry limit is reached, processing + * halts and all failed requests are returned in the response. Any delete + * requests that completed successfully still stick, they are not rolled back. + *

+ * You can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts the operation could attempt to delete more + * documents from the source than max_docs until it has + * successfully deleted max_docs documents, or it has gone through + * every document in the source query. + *

+ * Throttling delete requests + *

+ * To control the rate at which delete by query issues batches of delete + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to disable + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is + * set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch + * sizes cause Elasticsearch to create many requests and wait before starting + * the next set. This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Delete by query supports sliced scroll to parallelize the delete process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto lets Elasticsearch choose + * the number of slices to use. This setting will use one slice per shard, up to + * a certain limit. If there are multiple source data streams or indices, it + * will choose the number of slices based on the index or backing index with the + * smallest number of shards. Adding slices to the delete by query operation + * creates sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices only contains + * the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the earlier point about + * distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being deleted.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices + * hurts performance. Setting slices higher than the number of + * shards generally does not improve efficiency and adds overhead.
  • + *
  • Delete performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or delete performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Cancel a delete by query operation + *

+ * Any delete by query can be canceled using the task cancel API. For example: + * + *

+	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+	 * 
+	 * 
+ *

+ * The task ID can be found by using the get tasks API. + *

+ * Cancellation should happen quickly but might take a few seconds. The get task + * status API will continue to list the delete by query task until this task + * checks that it has been cancelled and terminates itself. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query">Documentation * on elastic.co */ @@ -638,13 +1607,142 @@ public CompletableFuture deleteByQuery(DeleteByQueryReque } /** - * Delete documents. Deletes documents that match the specified query. + * Delete documents. + *

+ * Deletes documents that match the specified query. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • delete or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. When you submit a delete by query + * request, Elasticsearch gets a snapshot of the data stream or index when it + * begins processing the request and deletes matching documents using internal + * versioning. If a document changes between the time that the snapshot is taken + * and the delete operation is processed, it results in a version conflict and + * the delete operation fails. + *

+ * NOTE: Documents with a version equal to 0 cannot be deleted using delete by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing a delete by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents to delete. + * A bulk delete request is performed for each batch of matching documents. If a + * search or bulk request is rejected, the requests are retried up to 10 times, + * with exponential back off. If the maximum retry limit is reached, processing + * halts and all failed requests are returned in the response. Any delete + * requests that completed successfully still stick, they are not rolled back. + *

+ * You can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts the operation could attempt to delete more + * documents from the source than max_docs until it has + * successfully deleted max_docs documents, or it has gone through + * every document in the source query. + *

+ * Throttling delete requests + *

+ * To control the rate at which delete by query issues batches of delete + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to disable + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is + * set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch + * sizes cause Elasticsearch to create many requests and wait before starting + * the next set. This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Delete by query supports sliced scroll to parallelize the delete process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto lets Elasticsearch choose + * the number of slices to use. This setting will use one slice per shard, up to + * a certain limit. If there are multiple source data streams or indices, it + * will choose the number of slices based on the index or backing index with the + * smallest number of shards. Adding slices to the delete by query operation + * creates sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices only contains + * the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the earlier point about + * distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being deleted.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices + * hurts performance. Setting slices higher than the number of + * shards generally does not improve efficiency and adds overhead.
  • + *
  • Delete performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or delete performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Cancel a delete by query operation + *

+ * Any delete by query can be canceled using the task cancel API. For example: + * + *

+	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+	 * 
+	 * 
+ *

+ * The task ID can be found by using the get tasks API. + *

+ * Cancellation should happen quickly but might take a few seconds. The get task + * status API will continue to list the delete by query task until this task + * checks that it has been cancelled and terminates itself. * * @param fn * a function that initializes a builder to create the * {@link DeleteByQueryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query">Documentation * on elastic.co */ @@ -664,7 +1762,7 @@ public final CompletableFuture deleteByQuery( * current batch to prevent scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle">Documentation * on elastic.co */ @@ -688,7 +1786,7 @@ public CompletableFuture deleteByQueryRethrottl * a function that initializes a builder to create the * {@link DeleteByQueryRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle">Documentation * on elastic.co */ @@ -704,7 +1802,7 @@ public final CompletableFuture deleteByQueryRet * template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script">Documentation * on elastic.co */ @@ -723,7 +1821,7 @@ public CompletableFuture deleteScript(DeleteScriptRequest * a function that initializes a builder to create the * {@link DeleteScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script">Documentation * on elastic.co */ @@ -735,10 +1833,32 @@ public final CompletableFuture deleteScript( // ----- Endpoint: exists /** - * Check a document. Checks if a specified document exists. + * Check a document. + *

+ * Verify that a document exists. For example, check to see if a document with + * the _id 0 exists: + * + *

+	 * HEAD my-index-000001/_doc/0
+	 * 
+	 * 
+ *

+ * If the document exists, the API returns a status code of + * 200 - OK. If the document doesn’t exist, the API returns + * 404 - Not Found. + *

+ * Versioning support + *

+ * You can use the version parameter to check the document only if + * its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -750,13 +1870,35 @@ public CompletableFuture exists(ExistsRequest request) { } /** - * Check a document. Checks if a specified document exists. + * Check a document. + *

+ * Verify that a document exists. For example, check to see if a document with + * the _id 0 exists: + * + *

+	 * HEAD my-index-000001/_doc/0
+	 * 
+	 * 
+ *

+ * If the document exists, the API returns a status code of + * 200 - OK. If the document doesn’t exist, the API returns + * 404 - Not Found. + *

+ * Versioning support + *

+ * You can use the version parameter to check the document only if + * its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -768,11 +1910,19 @@ public final CompletableFuture exists( // ----- Endpoint: exists_source /** - * Check for a document source. Checks if a document's _source is - * stored. + * Check for a document source. + *

+ * Check whether a document source exists in an index. For example: + * + *

+	 * HEAD my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * A document's source is not available if it is disabled in the mapping. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -784,14 +1934,22 @@ public CompletableFuture existsSource(ExistsSourceRequest reque } /** - * Check for a document source. Checks if a document's _source is - * stored. + * Check for a document source. + *

+ * Check whether a document source exists in an index. For example: + * + *

+	 * HEAD my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * A document's source is not available if it is disabled in the mapping. * * @param fn * a function that initializes a builder to create the * {@link ExistsSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -803,11 +1961,12 @@ public final CompletableFuture existsSource( // ----- Endpoint: explain /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -822,14 +1981,15 @@ public CompletableFuture> explain(Explain } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -839,11 +1999,12 @@ public final CompletableFuture> explain( } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -858,14 +2019,15 @@ public CompletableFuture> explain(Explain } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -887,7 +2049,7 @@ public final CompletableFuture> explain( * field that belongs to the keyword family. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -912,7 +2074,7 @@ public CompletableFuture fieldCaps(FieldCapsRequest request) * a function that initializes a builder to create the * {@link FieldCapsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -932,7 +2094,7 @@ public final CompletableFuture fieldCaps( * field that belongs to the keyword family. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -944,11 +2106,81 @@ public CompletableFuture fieldCaps() { // ----- Endpoint: get /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -963,14 +2195,84 @@ public CompletableFuture> get(GetRequest requ } /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -980,32 +2282,172 @@ public final CompletableFuture> get( } /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture> get(GetRequest request, Type tDocumentType) { + @SuppressWarnings("unchecked") + JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT; + endpoint = new EndpointWithResponseMapperAttr<>(endpoint, + "co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentType)); + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: * - * @see Documentation - * on elastic.co - */ - - public CompletableFuture> get(GetRequest request, Type tDocumentType) { - @SuppressWarnings("unchecked") - JsonEndpoint, ErrorResponse> endpoint = (JsonEndpoint, ErrorResponse>) GetRequest._ENDPOINT; - endpoint = new EndpointWithResponseMapperAttr<>(endpoint, - "co.elastic.clients:Deserializer:_global.get.Response.TDocument", getDeserializer(tDocumentType)); - - return this.transport.performRequestAsync(request, endpoint, this.transportOptions); - } - - /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1021,7 +2463,7 @@ public final CompletableFuture> get( * template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script">Documentation * on elastic.co */ @@ -1040,7 +2482,7 @@ public CompletableFuture getScript(GetScriptRequest request) * a function that initializes a builder to create the * {@link GetScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script">Documentation * on elastic.co */ @@ -1057,7 +2499,7 @@ public final CompletableFuture getScript( * Get a list of supported script contexts and their methods. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context">Documentation * on elastic.co */ public CompletableFuture getScriptContext() { @@ -1073,7 +2515,7 @@ public CompletableFuture getScriptContext() { * Get a list of available script types, languages, and contexts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages">Documentation * on elastic.co */ public CompletableFuture getScriptLanguages() { @@ -1084,10 +2526,25 @@ public CompletableFuture getScriptLanguages() { // ----- Endpoint: get_source /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: + * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1103,13 +2560,28 @@ public CompletableFuture> getSource(Get } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1119,10 +2591,25 @@ public final CompletableFuture> getSour } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1138,13 +2625,28 @@ public CompletableFuture> getSource(Get } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1186,7 +2688,7 @@ public final CompletableFuture> getSour * false to disable the more expensive analysis logic. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1231,7 +2733,7 @@ public CompletableFuture healthReport(HealthReportRequest * a function that initializes a builder to create the * {@link HealthReportRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1271,7 +2773,7 @@ public final CompletableFuture healthReport( * false to disable the more expensive analysis logic. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1283,12 +2785,198 @@ public CompletableFuture healthReport() { // ----- Endpoint: index /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create or update a document in an index. + *

+ * Add a JSON document to the specified data stream or index and make it + * searchable. If the target is an index and the document already exists, the + * request updates the document and increments its version. + *

+ * NOTE: You cannot use this API to send update requests for existing documents + * in a data stream. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add or overwrite a document using the + * PUT /<target>/_doc/<_id> request format, you must + * have the create, index, or write index + * privilege.
  • + *
  • To add a document using the POST /<target>/_doc/ + * request format, you must have the create_doc, + * create, index, or write index + * privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * NOTE: Replica shards might not all be started when an indexing operation + * returns successfully. By default, only the primary is required. Set + * wait_for_active_shards to change this default behavior. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Optimistic concurrency control + *

+ * Index operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. + *

+ * No operation (noop) updates + *

+ * When updating a document by using this API, a new version of the document is + * always created even if the document hasn't changed. If this isn't acceptable + * use the _update API with detect_noop set to + * true. The detect_noop option isn't available on + * this API because it doesn’t fetch the old source and isn't able to compare it + * against the new source. + *

+ * There isn't a definitive rule for when noop updates aren't acceptable. It's a + * combination of lots of factors like how frequently your data source sends + * updates that are actually noops and how many queries per second Elasticsearch + * runs on the shard receiving the updates. + *

+ * Versioning + *

+ * Each indexed document is given a version number. By default, internal + * versioning is used that starts at 1 and increments with each update, deletes + * included. Optionally, the version number can be set to an external value (for + * example, if maintained in a database). To enable this functionality, + * version_type should be set to external. The value + * provided must be a numeric, long value greater than or equal to 0, and less + * than around 9.2e+18. + *

+ * NOTE: Versioning is completely real time, and is not affected by the near + * real time aspects of search operations. If no version is provided, the + * operation runs without any version checks. + *

+ * When using the external version type, the system checks to see if the version + * number passed to the index request is greater than the version of the + * currently stored document. If true, the document will be indexed and the new + * version number used. If the value provided is less than or equal to the + * stored document's version number, a version conflict will occur and the index + * operation will fail. For example: * + *

+	 * PUT my-index-000001/_doc/1?version=2&version_type=external
+	 * {
+	 *   "user": {
+	 *     "id": "elkbee"
+	 *   }
+	 * }
+	 *
+	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+	 *
+	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -1300,15 +2988,201 @@ public CompletableFuture index(IndexRequest + * Add a JSON document to the specified data stream or index and make it + * searchable. If the target is an index and the document already exists, the + * request updates the document and increments its version. + *

+ * NOTE: You cannot use this API to send update requests for existing documents + * in a data stream. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add or overwrite a document using the + * PUT /<target>/_doc/<_id> request format, you must + * have the create, index, or write index + * privilege.
  • + *
  • To add a document using the POST /<target>/_doc/ + * request format, you must have the create_doc, + * create, index, or write index + * privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * NOTE: Replica shards might not all be started when an indexing operation + * returns successfully. By default, only the primary is required. Set + * wait_for_active_shards to change this default behavior. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Optimistic concurrency control + *

+ * Index operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. + *

+ * No operation (noop) updates + *

+ * When updating a document by using this API, a new version of the document is + * always created even if the document hasn't changed. If this isn't acceptable + * use the _update API with detect_noop set to + * true. The detect_noop option isn't available on + * this API because it doesn’t fetch the old source and isn't able to compare it + * against the new source. + *

+ * There isn't a definitive rule for when noop updates aren't acceptable. It's a + * combination of lots of factors like how frequently your data source sends + * updates that are actually noops and how many queries per second Elasticsearch + * runs on the shard receiving the updates. + *

+ * Versioning + *

+ * Each indexed document is given a version number. By default, internal + * versioning is used that starts at 1 and increments with each update, deletes + * included. Optionally, the version number can be set to an external value (for + * example, if maintained in a database). To enable this functionality, + * version_type should be set to external. The value + * provided must be a numeric, long value greater than or equal to 0, and less + * than around 9.2e+18. + *

+ * NOTE: Versioning is completely real time, and is not affected by the near + * real time aspects of search operations. If no version is provided, the + * operation runs without any version checks. + *

+ * When using the external version type, the system checks to see if the version + * number passed to the index request is greater than the version of the + * currently stored document. If true, the document will be indexed and the new + * version number used. If the value provided is less than or equal to the + * stored document's version number, a version conflict will occur and the index + * operation will fail. For example: * + *

+	 * PUT my-index-000001/_doc/1?version=2&version_type=external
+	 * {
+	 *   "user": {
+	 *     "id": "elkbee"
+	 *   }
+	 * }
+	 *
+	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+	 *
+	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link IndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -1323,7 +3197,7 @@ public final CompletableFuture index( * Get cluster info. Get basic build, version, and cluster information. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info">Documentation * on elastic.co */ public CompletableFuture info() { @@ -1349,9 +3223,21 @@ public CompletableFuture info() { *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1383,12 +3269,24 @@ public CompletableFuture> knnSearch(Knn *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link KnnSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1414,9 +3312,21 @@ public final CompletableFuture> knnSear *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1448,12 +3358,24 @@ public CompletableFuture> knnSearch(Knn *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link KnnSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1471,9 +3393,27 @@ public final CompletableFuture> knnSear * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1494,12 +3434,30 @@ public CompletableFuture> mget(MgetRequest r * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1515,9 +3473,27 @@ public final CompletableFuture> mget( * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1537,12 +3513,30 @@ public CompletableFuture> mget(MgetRequest r * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1577,7 +3571,7 @@ public final CompletableFuture> mget( * application/x-ndjson. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1618,7 +3612,7 @@ public CompletableFuture> msearch(Msearch * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1651,7 +3645,7 @@ public final CompletableFuture> msearch( * application/x-ndjson. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1692,7 +3686,7 @@ public CompletableFuture> msearch(Msearch * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1705,9 +3699,25 @@ public final CompletableFuture> msearch( /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1724,12 +3734,28 @@ public CompletableFuture> msearch /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1741,9 +3767,25 @@ public final CompletableFuture> m /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1760,12 +3802,28 @@ public CompletableFuture> msearch /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1779,14 +3837,20 @@ public final CompletableFuture> m /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1800,17 +3864,23 @@ public CompletableFuture mtermvectors(MtermvectorsRequest /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @param fn * a function that initializes a builder to create the * {@link MtermvectorsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1822,14 +3892,20 @@ public final CompletableFuture mtermvectors( /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1853,11 +3929,53 @@ public CompletableFuture mtermvectors() { * the more recent point in time. *

* A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. + * requests. + *

+ * A subsequent search request with the pit parameter must not + * specify index, routing, or preference + * values as these parameters are copied from the point in time. + *

+ * Just like regular searches, you can use from and + * size to page through point in time search results, up to the + * first 10,000 hits. If you want to retrieve more hits, use PIT with + * search_after. + *

+ * IMPORTANT: The open point in time request and each subsequent search request + * can return different identifiers; always use the most recently received ID + * for the next search request. + *

+ * When a PIT that contains shard failures is used in a search request, the + * missing are always reported in the search response as a + * NoShardAvailableActionException exception. To get rid of these + * exceptions, a new PIT needs to be created so that shards missing from the + * previous PIT can be handled, assuming they become available in the meantime. + *

+ * Keeping point in time alive + *

+ * The keep_alive parameter, which is passed to a open point in + * time request and search request, extends the time to live of the + * corresponding point in time. The value does not need to be long enough to + * process all data — it just needs to be long enough for the next request. + *

+ * Normally, the background merge process optimizes the index by merging + * together smaller segments to create new, bigger segments. Once the smaller + * segments are no longer needed they are deleted. However, open point-in-times + * prevent the old segments from being deleted since they are still in use. + *

+ * TIP: Keeping older segments alive means that more disk space and file handles + * are needed. Ensure that you have configured your nodes to have ample free + * file handles. + *

+ * Additionally, if a segment contains deleted or updated documents then the + * point in time must keep track of whether each document in the segment was + * live at the time of the initial search request. Ensure that your nodes have + * sufficient heap space if you have many open point-in-times on an index that + * is subject to ongoing deletes or updates. Note that a point-in-time doesn't + * prevent its associated indices from being deleted. You can check how many + * point-in-times (that is, search contexts) are open with the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -1881,14 +3999,56 @@ public CompletableFuture openPointInTime(OpenPointInTim * the more recent point in time. *

* A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. + * requests. + *

+ * A subsequent search request with the pit parameter must not + * specify index, routing, or preference + * values as these parameters are copied from the point in time. + *

+ * Just like regular searches, you can use from and + * size to page through point in time search results, up to the + * first 10,000 hits. If you want to retrieve more hits, use PIT with + * search_after. + *

+ * IMPORTANT: The open point in time request and each subsequent search request + * can return different identifiers; always use the most recently received ID + * for the next search request. + *

+ * When a PIT that contains shard failures is used in a search request, the + * missing are always reported in the search response as a + * NoShardAvailableActionException exception. To get rid of these + * exceptions, a new PIT needs to be created so that shards missing from the + * previous PIT can be handled, assuming they become available in the meantime. + *

+ * Keeping point in time alive + *

+ * The keep_alive parameter, which is passed to a open point in + * time request and search request, extends the time to live of the + * corresponding point in time. The value does not need to be long enough to + * process all data — it just needs to be long enough for the next request. + *

+ * Normally, the background merge process optimizes the index by merging + * together smaller segments to create new, bigger segments. Once the smaller + * segments are no longer needed they are deleted. However, open point-in-times + * prevent the old segments from being deleted since they are still in use. + *

+ * TIP: Keeping older segments alive means that more disk space and file handles + * are needed. Ensure that you have configured your nodes to have ample free + * file handles. + *

+ * Additionally, if a segment contains deleted or updated documents then the + * point in time must keep track of whether each document in the segment was + * live at the time of the initial search request. Ensure that your nodes have + * sufficient heap space if you have many open point-in-times on an index that + * is subject to ongoing deletes or updates. Note that a point-in-time doesn't + * prevent its associated indices from being deleted. You can check how many + * point-in-times (that is, search contexts) are open with the nodes stats API. * * @param fn * a function that initializes a builder to create the * {@link OpenPointInTimeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -1903,7 +4063,7 @@ public final CompletableFuture openPointInTime( * Ping the cluster. Get information about whether the cluster is running. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster">Documentation * on elastic.co */ public CompletableFuture ping() { @@ -1917,7 +4077,7 @@ public CompletableFuture ping() { * script or search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script">Documentation * on elastic.co */ @@ -1936,7 +4096,7 @@ public CompletableFuture putScript(PutScriptRequest request) * a function that initializes a builder to create the * {@link PutScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script">Documentation * on elastic.co */ @@ -1954,7 +4114,7 @@ public final CompletableFuture putScript( * queries. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval">Documentation * on elastic.co */ @@ -1968,32 +4128,296 @@ public CompletableFuture rankEval(RankEvalRequest request) { /** * Evaluate ranked search results. *

- * Evaluate the quality of ranked search results over a set of typical search - * queries. + * Evaluate the quality of ranked search results over a set of typical search + * queries. + * + * @param fn + * a function that initializes a builder to create the + * {@link RankEvalRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture rankEval( + Function> fn) { + return rankEval(fn.apply(new RankEvalRequest.Builder()).build()); + } + + // ----- Endpoint: reindex + + /** + * Reindex documents. + *

+ * Copy documents from a source to a destination. You can copy all documents to + * the destination index or reindex a subset of the documents. The source can be + * any existing index, alias, or data stream. The destination must differ from + * the source. For example, you cannot reindex a data stream into itself. + *

+ * IMPORTANT: Reindex requires _source to be enabled for all + * documents in the source. The destination should be configured as wanted + * before calling the reindex API. Reindex does not copy the settings from the + * source or its associated template. Mappings, shard counts, and replicas, for + * example, must be configured ahead of time. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following security privileges: + *

    + *
  • The read index privilege for the source data stream, index, + * or alias.
  • + *
  • The write index privilege for the destination data stream, + * index, or index alias.
  • + *
  • To automatically create a data stream or index with a reindex API + * request, you must have the auto_configure, + * create_index, or manage index privilege for the + * destination data stream, index, or alias.
  • + *
  • If reindexing from a remote cluster, the source.remote.user + * must have the monitor cluster privilege and the + * read index privilege for the source data stream, index, or + * alias.
  • + *
+ *

+ * If reindexing from a remote cluster, you must explicitly allow the remote + * host in the reindex.remote.whitelist setting. Automatic data + * stream creation requires a matching index template with data stream enabled. + *

+ * The dest element can be configured like the index API to control + * optimistic concurrency control. Omitting version_type or setting + * it to internal causes Elasticsearch to blindly dump documents + * into the destination, overwriting any that happen to have the same ID. + *

+ * Setting version_type to external causes + * Elasticsearch to preserve the version from the source, create + * any documents that are missing, and update any documents that have an older + * version in the destination than they do in the source. + *

+ * Setting op_type to create causes the reindex API to + * create only missing documents in the destination. All existing documents will + * cause a version conflict. + *

+ * IMPORTANT: Because data streams are append-only, any reindex request to a + * destination data stream must have an op_type of + * create. A reindex can only add new documents to a destination + * data stream. It cannot update existing documents in a destination data + * stream. + *

+ * By default, version conflicts abort the reindex process. To continue + * reindexing if there are conflicts, set the conflicts request + * body property to proceed. In this case, the response includes a + * count of the version conflicts that were encountered. Note that the handling + * of other error types is unaffected by the conflicts property. + * Additionally, if you opt to count version conflicts, the operation could + * attempt to reindex more documents from the source than max_docs + * until it has successfully indexed max_docs documents into the + * target or it has gone through every document in the source query. + *

+ * NOTE: The reindex API makes no effort to handle ID collisions. The last + * document written will "win" but the order isn't usually predictable + * so it is not a good idea to rely on this behavior. Instead, make sure that + * IDs are unique by using a script. + *

+ * Running reindex asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to cancel or get the status of the task. Elasticsearch creates a + * record of this task as a document at _tasks/<task_id>. + *

+ * Reindex from multiple sources + *

+ * If you have many sources to reindex it is generally better to reindex them + * one at a time rather than using a glob pattern to pick up multiple sources. + * That way you can resume the process if there are any errors by removing the + * partially completed source and starting over. It also makes parallelizing the + * process fairly simple: split the list of sources to reindex and run each list + * in parallel. + *

+ * For example, you can use a bash script like this: + * + *

+	 * for index in i1 i2 i3 i4 i5; do
+	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+	 *     "source": {
+	 *       "index": "'$index'"
+	 *     },
+	 *     "dest": {
+	 *       "index": "'$index'-reindexed"
+	 *     }
+	 *   }'
+	 * done
+	 * 
+	 * 
+ *

+ * Throttling + *

+ * Set requests_per_second to any positive decimal number + * (1.4, 6, 1000, for example) to + * throttle the rate at which reindex issues batches of index operations. + * Requests are throttled by padding each batch with a wait time. To turn off + * throttling, set requests_per_second to -1. + *

+ * The throttling is done by waiting between batches so that the scroll that + * reindex uses internally can be given a timeout that takes into account the + * padding. The padding time is the difference between the batch size divided by + * the requests_per_second and the time spent writing. By default + * the batch size is 1000, so if requests_per_second + * is set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single bulk request, large batch sizes cause + * Elasticsearch to create many requests and then wait for a while before + * starting the next set. This is "bursty" instead of + * "smooth". + *

+ * Slicing + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * You can slice a reindex request manually by providing a slice ID and total + * number of slices to each request. You can also let reindex automatically + * parallelize by using sliced scroll to slice on _id. The + * slices parameter specifies the number of slices to use. + *

+ * Adding slices to the reindex request just automates the manual + * process, creating sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks API. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices, each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the previous point + * about distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being reindexed.
  • + *
  • Each sub-request gets a slightly different snapshot of the source, though + * these are all taken at approximately the same time.
  • + *
+ *

+ * If slicing automatically, setting slices to auto + * will choose a reasonable number for most indices. If slicing manually or + * otherwise tuning automatic slicing, use the following guidelines. + *

+ * Query performance is most efficient when the number of slices is equal to the + * number of shards in the index. If that number is large (for example, + * 500), choose a lower number as too many slices will hurt + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead. + *

+ * Indexing performance scales linearly across available resources with the + * number of slices. + *

+ * Whether query or indexing performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Modify documents during reindexing + *

+ * Like _update_by_query, reindex operations support a script that + * modifies the document. Unlike _update_by_query, the script is + * allowed to modify the document's metadata. + *

+ * Just as in _update_by_query, you can set ctx.op to + * change the operation that is run on the destination. For example, set + * ctx.op to noop if your script decides that the + * document doesn’t have to be indexed in the destination. This "no + * operation" will be reported in the noop counter in the + * response body. Set ctx.op to delete if your script + * decides that the document must be deleted from the destination. The deletion + * will be reported in the deleted counter in the response body. + * Setting ctx.op to anything else will return an error, as will + * setting any other field in ctx. + *

+ * Think of the possibilities! Just be careful; you are able to change: + *

    + *
  • _id
  • + *
  • _index
  • + *
  • _version
  • + *
  • _routing
  • + *
+ *

+ * Setting _version to null or clearing it from the + * ctx map is just like not sending the version in an indexing + * request. It will cause the document to be overwritten in the destination + * regardless of the version on the target or the version type you use in the + * reindex API. + *

+ * Reindex from remote + *

+ * Reindex supports reindexing from a remote Elasticsearch cluster. The + * host parameter must contain a scheme, host, port, and optional + * path. The username and password parameters are + * optional and when they are present the reindex operation will connect to the + * remote Elasticsearch node using basic authentication. Be sure to use HTTPS + * when using basic authentication or the password will be sent in plain text. + * There are a range of settings available to configure the behavior of the + * HTTPS connection. + *

+ * When using Elastic Cloud, it is also possible to authenticate against the + * remote cluster through the use of a valid API key. Remote hosts must be + * explicitly allowed with the reindex.remote.whitelist setting. It + * can be set to a comma delimited list of allowed remote host and port + * combinations. Scheme is ignored; only the host and port are used. For + * example: * - * @param fn - * a function that initializes a builder to create the - * {@link RankEvalRequest} - * @see Documentation - * on elastic.co - */ - - public final CompletableFuture rankEval( - Function> fn) { - return rankEval(fn.apply(new RankEvalRequest.Builder()).build()); - } - - // ----- Endpoint: reindex - - /** - * Reindex documents. Copies documents from a source to a destination. The - * source can be any existing index, alias, or data stream. The destination must - * differ from the source. For example, you cannot reindex a data stream into - * itself. + *

+	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+	 * 
+	 * 
+ *

+ * The list of allowed hosts must be configured on any nodes that will + * coordinate the reindex. This feature should work with remote clusters of any + * version of Elasticsearch. This should enable you to upgrade from any version + * of Elasticsearch to the current version by reindexing from a cluster of the + * old version. + *

+ * WARNING: Elasticsearch does not support forward compatibility across major + * versions. For example, you cannot reindex from a 7.x cluster into a 6.x + * cluster. + *

+ * To enable queries sent to older versions of Elasticsearch, the + * query parameter is sent directly to the remote host without + * validation or modification. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * Reindexing from a remote server uses an on-heap buffer that defaults to a + * maximum size of 100mb. If the remote index includes very large documents + * you'll need to use a smaller batch size. It is also possible to set the + * socket read timeout on the remote connection with the + * socket_timeout field and the connection timeout with the + * connect_timeout field. Both default to 30 seconds. + *

+ * Configuring SSL parameters + *

+ * Reindex from remote supports configurable SSL settings. These must be + * specified in the elasticsearch.yml file, with the exception of + * the secure settings, which you add in the Elasticsearch keystore. It is not + * possible to configure SSL in the body of the reindex request. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2005,16 +4429,280 @@ public CompletableFuture reindex(ReindexRequest request) { } /** - * Reindex documents. Copies documents from a source to a destination. The - * source can be any existing index, alias, or data stream. The destination must - * differ from the source. For example, you cannot reindex a data stream into - * itself. + * Reindex documents. + *

+ * Copy documents from a source to a destination. You can copy all documents to + * the destination index or reindex a subset of the documents. The source can be + * any existing index, alias, or data stream. The destination must differ from + * the source. For example, you cannot reindex a data stream into itself. + *

+ * IMPORTANT: Reindex requires _source to be enabled for all + * documents in the source. The destination should be configured as wanted + * before calling the reindex API. Reindex does not copy the settings from the + * source or its associated template. Mappings, shard counts, and replicas, for + * example, must be configured ahead of time. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following security privileges: + *

    + *
  • The read index privilege for the source data stream, index, + * or alias.
  • + *
  • The write index privilege for the destination data stream, + * index, or index alias.
  • + *
  • To automatically create a data stream or index with a reindex API + * request, you must have the auto_configure, + * create_index, or manage index privilege for the + * destination data stream, index, or alias.
  • + *
  • If reindexing from a remote cluster, the source.remote.user + * must have the monitor cluster privilege and the + * read index privilege for the source data stream, index, or + * alias.
  • + *
+ *

+ * If reindexing from a remote cluster, you must explicitly allow the remote + * host in the reindex.remote.whitelist setting. Automatic data + * stream creation requires a matching index template with data stream enabled. + *

+ * The dest element can be configured like the index API to control + * optimistic concurrency control. Omitting version_type or setting + * it to internal causes Elasticsearch to blindly dump documents + * into the destination, overwriting any that happen to have the same ID. + *

+ * Setting version_type to external causes + * Elasticsearch to preserve the version from the source, create + * any documents that are missing, and update any documents that have an older + * version in the destination than they do in the source. + *

+ * Setting op_type to create causes the reindex API to + * create only missing documents in the destination. All existing documents will + * cause a version conflict. + *

+ * IMPORTANT: Because data streams are append-only, any reindex request to a + * destination data stream must have an op_type of + * create. A reindex can only add new documents to a destination + * data stream. It cannot update existing documents in a destination data + * stream. + *

+ * By default, version conflicts abort the reindex process. To continue + * reindexing if there are conflicts, set the conflicts request + * body property to proceed. In this case, the response includes a + * count of the version conflicts that were encountered. Note that the handling + * of other error types is unaffected by the conflicts property. + * Additionally, if you opt to count version conflicts, the operation could + * attempt to reindex more documents from the source than max_docs + * until it has successfully indexed max_docs documents into the + * target or it has gone through every document in the source query. + *

+ * NOTE: The reindex API makes no effort to handle ID collisions. The last + * document written will "win" but the order isn't usually predictable + * so it is not a good idea to rely on this behavior. Instead, make sure that + * IDs are unique by using a script. + *

+ * Running reindex asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to cancel or get the status of the task. Elasticsearch creates a + * record of this task as a document at _tasks/<task_id>. + *

+ * Reindex from multiple sources + *

+ * If you have many sources to reindex it is generally better to reindex them + * one at a time rather than using a glob pattern to pick up multiple sources. + * That way you can resume the process if there are any errors by removing the + * partially completed source and starting over. It also makes parallelizing the + * process fairly simple: split the list of sources to reindex and run each list + * in parallel. + *

+ * For example, you can use a bash script like this: + * + *

+	 * for index in i1 i2 i3 i4 i5; do
+	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+	 *     "source": {
+	 *       "index": "'$index'"
+	 *     },
+	 *     "dest": {
+	 *       "index": "'$index'-reindexed"
+	 *     }
+	 *   }'
+	 * done
+	 * 
+	 * 
+ *

+ * Throttling + *

+ * Set requests_per_second to any positive decimal number + * (1.4, 6, 1000, for example) to + * throttle the rate at which reindex issues batches of index operations. + * Requests are throttled by padding each batch with a wait time. To turn off + * throttling, set requests_per_second to -1. + *

+ * The throttling is done by waiting between batches so that the scroll that + * reindex uses internally can be given a timeout that takes into account the + * padding. The padding time is the difference between the batch size divided by + * the requests_per_second and the time spent writing. By default + * the batch size is 1000, so if requests_per_second + * is set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single bulk request, large batch sizes cause + * Elasticsearch to create many requests and then wait for a while before + * starting the next set. This is "bursty" instead of + * "smooth". + *

+ * Slicing + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * You can slice a reindex request manually by providing a slice ID and total + * number of slices to each request. You can also let reindex automatically + * parallelize by using sliced scroll to slice on _id. The + * slices parameter specifies the number of slices to use. + *

+ * Adding slices to the reindex request just automates the manual + * process, creating sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks API. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices, each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the previous point + * about distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being reindexed.
  • + *
  • Each sub-request gets a slightly different snapshot of the source, though + * these are all taken at approximately the same time.
  • + *
+ *

+ * If slicing automatically, setting slices to auto + * will choose a reasonable number for most indices. If slicing manually or + * otherwise tuning automatic slicing, use the following guidelines. + *

+ * Query performance is most efficient when the number of slices is equal to the + * number of shards in the index. If that number is large (for example, + * 500), choose a lower number as too many slices will hurt + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead. + *

+ * Indexing performance scales linearly across available resources with the + * number of slices. + *

+ * Whether query or indexing performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Modify documents during reindexing + *

+ * Like _update_by_query, reindex operations support a script that + * modifies the document. Unlike _update_by_query, the script is + * allowed to modify the document's metadata. + *

+ * Just as in _update_by_query, you can set ctx.op to + * change the operation that is run on the destination. For example, set + * ctx.op to noop if your script decides that the + * document doesn’t have to be indexed in the destination. This "no + * operation" will be reported in the noop counter in the + * response body. Set ctx.op to delete if your script + * decides that the document must be deleted from the destination. The deletion + * will be reported in the deleted counter in the response body. + * Setting ctx.op to anything else will return an error, as will + * setting any other field in ctx. + *

+ * Think of the possibilities! Just be careful; you are able to change: + *

    + *
  • _id
  • + *
  • _index
  • + *
  • _version
  • + *
  • _routing
  • + *
+ *

+ * Setting _version to null or clearing it from the + * ctx map is just like not sending the version in an indexing + * request. It will cause the document to be overwritten in the destination + * regardless of the version on the target or the version type you use in the + * reindex API. + *

+ * Reindex from remote + *

+ * Reindex supports reindexing from a remote Elasticsearch cluster. The + * host parameter must contain a scheme, host, port, and optional + * path. The username and password parameters are + * optional and when they are present the reindex operation will connect to the + * remote Elasticsearch node using basic authentication. Be sure to use HTTPS + * when using basic authentication or the password will be sent in plain text. + * There are a range of settings available to configure the behavior of the + * HTTPS connection. + *

+ * When using Elastic Cloud, it is also possible to authenticate against the + * remote cluster through the use of a valid API key. Remote hosts must be + * explicitly allowed with the reindex.remote.whitelist setting. It + * can be set to a comma delimited list of allowed remote host and port + * combinations. Scheme is ignored; only the host and port are used. For + * example: + * + *

+	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+	 * 
+	 * 
+ *

+ * The list of allowed hosts must be configured on any nodes that will + * coordinate the reindex. This feature should work with remote clusters of any + * version of Elasticsearch. This should enable you to upgrade from any version + * of Elasticsearch to the current version by reindexing from a cluster of the + * old version. + *

+ * WARNING: Elasticsearch does not support forward compatibility across major + * versions. For example, you cannot reindex from a 7.x cluster into a 6.x + * cluster. + *

+ * To enable queries sent to older versions of Elasticsearch, the + * query parameter is sent directly to the remote host without + * validation or modification. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * Reindexing from a remote server uses an on-heap buffer that defaults to a + * maximum size of 100mb. If the remote index includes very large documents + * you'll need to use a smaller batch size. It is also possible to set the + * socket read timeout on the remote connection with the + * socket_timeout field and the connection timeout with the + * connect_timeout field. Both default to 30 seconds. + *

+ * Configuring SSL parameters + *

+ * Reindex from remote supports configurable SSL settings. These must be + * specified in the elasticsearch.yml file, with the exception of + * the secure settings, which you add in the Elasticsearch keystore. It is not + * possible to configure SSL in the body of the reindex request. * * @param fn * a function that initializes a builder to create the * {@link ReindexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2029,9 +4717,19 @@ public final CompletableFuture reindex( * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. + * For example: + * + *

+	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+	 * 
+	 * 
+ *

+ * Rethrottling that speeds up the query takes effect immediately. Rethrottling + * that slows down the query will take effect after completing the current + * batch. This behavior prevents scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2046,12 +4744,22 @@ public CompletableFuture reindexRethrottle(ReindexRet * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. + * For example: + * + *

+	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+	 * 
+	 * 
+ *

+ * Rethrottling that speeds up the query takes effect immediately. Rethrottling + * that slows down the query will take effect after completing the current + * batch. This behavior prevents scroll timeouts. * * @param fn * a function that initializes a builder to create the * {@link ReindexRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2068,7 +4776,7 @@ public final CompletableFuture reindexRethrottle( * Render a search template as a search request body. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2088,7 +4796,7 @@ public CompletableFuture renderSearchTemplate(Rend * a function that initializes a builder to create the * {@link RenderSearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2103,7 +4811,7 @@ public final CompletableFuture renderSearchTemplat * Render a search template as a search request body. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2115,10 +4823,21 @@ public CompletableFuture renderSearchTemplate() { // ----- Endpoint: scripts_painless_execute /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2134,13 +4853,24 @@ public CompletableFuture> scri } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2151,10 +4881,21 @@ public final CompletableFuture } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2170,13 +4911,24 @@ public CompletableFuture> scri } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2214,7 +4966,7 @@ public final CompletableFuture * changes only affect later search and scroll requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2257,7 +5009,7 @@ public CompletableFuture> scroll(ScrollReq * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2292,7 +5044,7 @@ public final CompletableFuture> scroll( * changes only affect later search and scroll requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2334,7 +5086,7 @@ public CompletableFuture> scroll(ScrollReq * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2351,9 +5103,32 @@ public final CompletableFuture> scroll( * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2373,12 +5148,35 @@ public CompletableFuture> search(SearchReq * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2393,9 +5191,32 @@ public final CompletableFuture> search( * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2414,12 +5235,35 @@ public CompletableFuture> search(SearchReq * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2433,10 +5277,364 @@ public final CompletableFuture> search( /** * Search a vector tile. *

- * Search a vector tile for geospatial values. + * Search a vector tile for geospatial values. Before using this API, you should + * be familiar with the Mapbox vector tile specification. The API returns + * results as a binary mapbox vector tile. + *

+ * Internally, Elasticsearch translates a vector tile search API request into a + * search containing: + *

    + *
  • A geo_bounding_box query on the <field>. + * The query uses the <zoom>/<x>/<y> tile as a + * bounding box.
  • + *
  • A geotile_grid or geohex_grid aggregation on + * the <field>. The grid_agg parameter + * determines the aggregation type. The aggregation uses the + * <zoom>/<x>/<y> tile as a bounding box.
  • + *
  • Optionally, a geo_bounds aggregation on the + * <field>. The search only includes this aggregation if the + * exact_bounds parameter is true.
  • + *
  • If the optional parameter with_labels is true, + * the internal search will include a dynamic runtime field that calls the + * getLabelPosition function of the geometry doc value. This + * enables the generation of new point features containing suggested geometry + * labels, so that, for example, multi-polygons will have only one label.
  • + *
+ *

+ * For example, Elasticsearch may translate a vector tile search API request + * with a grid_agg argument of geotile and an + * exact_bounds argument of true into the following + * search + * + *

+	 * GET my-index/_search
+	 * {
+	 *   "size": 10000,
+	 *   "query": {
+	 *     "geo_bounding_box": {
+	 *       "my-geo-field": {
+	 *         "top_left": {
+	 *           "lat": -40.979898069620134,
+	 *           "lon": -45
+	 *         },
+	 *         "bottom_right": {
+	 *           "lat": -66.51326044311186,
+	 *           "lon": 0
+	 *         }
+	 *       }
+	 *     }
+	 *   },
+	 *   "aggregations": {
+	 *     "grid": {
+	 *       "geotile_grid": {
+	 *         "field": "my-geo-field",
+	 *         "precision": 11,
+	 *         "size": 65536,
+	 *         "bounds": {
+	 *           "top_left": {
+	 *             "lat": -40.979898069620134,
+	 *             "lon": -45
+	 *           },
+	 *           "bottom_right": {
+	 *             "lat": -66.51326044311186,
+	 *             "lon": 0
+	 *           }
+	 *         }
+	 *       }
+	 *     },
+	 *     "bounds": {
+	 *       "geo_bounds": {
+	 *         "field": "my-geo-field",
+	 *         "wrap_longitude": false
+	 *       }
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ *

+ * The API returns results as a binary Mapbox vector tile. Mapbox vector tiles + * are encoded as Google Protobufs (PBF). By default, the tile contains three + * layers: + *

    + *
  • A hits layer containing a feature for each + * <field> value matching the geo_bounding_box + * query.
  • + *
  • An aggs layer containing a feature for each cell of the + * geotile_grid or geohex_grid. The layer only + * contains features for cells with matching data.
  • + *
  • A meta layer containing: + *
      + *
    • A feature containing a bounding box. By default, this is the bounding box + * of the tile.
    • + *
    • Value ranges for any sub-aggregations on the geotile_grid or + * geohex_grid.
    • + *
    • Metadata for the search.
    • + *
    + *
  • + *
+ *

+ * The API only returns features that can display at its zoom level. For + * example, if a polygon feature has no area at its zoom level, the API omits + * it. The API returns errors as UTF-8 encoded JSON. + *

+ * IMPORTANT: You can specify several options for this API as either a query + * parameter or request body parameter. If you specify both parameters, the + * query parameter takes precedence. + *

+ * Grid precision for geotile + *

+ * For a grid_agg of geotile, you can use cells in the + * aggs layer as tiles for lower zoom levels. + * grid_precision represents the additional zoom levels available + * through these cells. The final precision is computed by as follows: + * <zoom> + grid_precision. For example, if + * <zoom> is 7 and grid_precision is 8, then the + * geotile_grid aggregation will use a precision of 15. The maximum + * final precision is 29. The grid_precision also determines the + * number of cells for the grid as follows: + * (2^grid_precision) x (2^grid_precision). For example, a value of + * 8 divides the tile into a grid of 256 x 256 cells. The aggs + * layer only contains features for cells with matching data. + *

+ * Grid precision for geohex + *

+ * For a grid_agg of geohex, Elasticsearch uses + * <zoom> and grid_precision to calculate a + * final precision as follows: <zoom> + grid_precision. + *

+ * This precision determines the H3 resolution of the hexagonal cells produced + * by the geohex aggregation. The following table maps the H3 + * resolution for each precision. For example, if <zoom> is 3 + * and grid_precision is 3, the precision is 6. At a precision of + * 6, hexagonal cells have an H3 resolution of 2. If <zoom> + * is 3 and grid_precision is 4, the precision is 7. At a precision + * of 7, hexagonal cells have an H3 resolution of 3. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
+ *

+ * Hexagonal cells don't align perfectly on a vector tile. Some cells may + * intersect more than one vector tile. To compute the H3 resolution for each + * precision, Elasticsearch compares the average density of hexagonal bins at + * each resolution with the average density of tile bins at each zoom level. + * Elasticsearch uses the H3 resolution that is closest to the corresponding + * geotile density. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt">Documentation * on elastic.co */ @@ -2450,13 +5648,367 @@ public CompletableFuture searchMvt(SearchMvtRequest request) { /** * Search a vector tile. *

- * Search a vector tile for geospatial values. + * Search a vector tile for geospatial values. Before using this API, you should + * be familiar with the Mapbox vector tile specification. The API returns + * results as a binary mapbox vector tile. + *

+ * Internally, Elasticsearch translates a vector tile search API request into a + * search containing: + *

    + *
  • A geo_bounding_box query on the <field>. + * The query uses the <zoom>/<x>/<y> tile as a + * bounding box.
  • + *
  • A geotile_grid or geohex_grid aggregation on + * the <field>. The grid_agg parameter + * determines the aggregation type. The aggregation uses the + * <zoom>/<x>/<y> tile as a bounding box.
  • + *
  • Optionally, a geo_bounds aggregation on the + * <field>. The search only includes this aggregation if the + * exact_bounds parameter is true.
  • + *
  • If the optional parameter with_labels is true, + * the internal search will include a dynamic runtime field that calls the + * getLabelPosition function of the geometry doc value. This + * enables the generation of new point features containing suggested geometry + * labels, so that, for example, multi-polygons will have only one label.
  • + *
+ *

+ * For example, Elasticsearch may translate a vector tile search API request + * with a grid_agg argument of geotile and an + * exact_bounds argument of true into the following + * search + * + *

+	 * GET my-index/_search
+	 * {
+	 *   "size": 10000,
+	 *   "query": {
+	 *     "geo_bounding_box": {
+	 *       "my-geo-field": {
+	 *         "top_left": {
+	 *           "lat": -40.979898069620134,
+	 *           "lon": -45
+	 *         },
+	 *         "bottom_right": {
+	 *           "lat": -66.51326044311186,
+	 *           "lon": 0
+	 *         }
+	 *       }
+	 *     }
+	 *   },
+	 *   "aggregations": {
+	 *     "grid": {
+	 *       "geotile_grid": {
+	 *         "field": "my-geo-field",
+	 *         "precision": 11,
+	 *         "size": 65536,
+	 *         "bounds": {
+	 *           "top_left": {
+	 *             "lat": -40.979898069620134,
+	 *             "lon": -45
+	 *           },
+	 *           "bottom_right": {
+	 *             "lat": -66.51326044311186,
+	 *             "lon": 0
+	 *           }
+	 *         }
+	 *       }
+	 *     },
+	 *     "bounds": {
+	 *       "geo_bounds": {
+	 *         "field": "my-geo-field",
+	 *         "wrap_longitude": false
+	 *       }
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ *

+ * The API returns results as a binary Mapbox vector tile. Mapbox vector tiles + * are encoded as Google Protobufs (PBF). By default, the tile contains three + * layers: + *

    + *
  • A hits layer containing a feature for each + * <field> value matching the geo_bounding_box + * query.
  • + *
  • An aggs layer containing a feature for each cell of the + * geotile_grid or geohex_grid. The layer only + * contains features for cells with matching data.
  • + *
  • A meta layer containing: + *
      + *
    • A feature containing a bounding box. By default, this is the bounding box + * of the tile.
    • + *
    • Value ranges for any sub-aggregations on the geotile_grid or + * geohex_grid.
    • + *
    • Metadata for the search.
    • + *
    + *
  • + *
+ *

+ * The API only returns features that can display at its zoom level. For + * example, if a polygon feature has no area at its zoom level, the API omits + * it. The API returns errors as UTF-8 encoded JSON. + *

+ * IMPORTANT: You can specify several options for this API as either a query + * parameter or request body parameter. If you specify both parameters, the + * query parameter takes precedence. + *

+ * Grid precision for geotile + *

+ * For a grid_agg of geotile, you can use cells in the + * aggs layer as tiles for lower zoom levels. + * grid_precision represents the additional zoom levels available + * through these cells. The final precision is computed by as follows: + * <zoom> + grid_precision. For example, if + * <zoom> is 7 and grid_precision is 8, then the + * geotile_grid aggregation will use a precision of 15. The maximum + * final precision is 29. The grid_precision also determines the + * number of cells for the grid as follows: + * (2^grid_precision) x (2^grid_precision). For example, a value of + * 8 divides the tile into a grid of 256 x 256 cells. The aggs + * layer only contains features for cells with matching data. + *

+ * Grid precision for geohex + *

+ * For a grid_agg of geohex, Elasticsearch uses + * <zoom> and grid_precision to calculate a + * final precision as follows: <zoom> + grid_precision. + *

+ * This precision determines the H3 resolution of the hexagonal cells produced + * by the geohex aggregation. The following table maps the H3 + * resolution for each precision. For example, if <zoom> is 3 + * and grid_precision is 3, the precision is 6. At a precision of + * 6, hexagonal cells have an H3 resolution of 2. If <zoom> + * is 3 and grid_precision is 4, the precision is 7. At a precision + * of 7, hexagonal cells have an H3 resolution of 3. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
+ *

+ * Hexagonal cells don't align perfectly on a vector tile. Some cells may + * intersect more than one vector tile. To compute the H3 resolution for each + * precision, Elasticsearch compares the average density of hexagonal bins at + * each resolution with the average density of tile bins at each zoom level. + * Elasticsearch uses the H3 resolution that is closest to the corresponding + * geotile density. * * @param fn * a function that initializes a builder to create the * {@link SearchMvtRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt">Documentation * on elastic.co */ @@ -2473,10 +6025,14 @@ public final CompletableFuture searchMvt( * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2493,13 +6049,17 @@ public CompletableFuture searchShards(SearchShardsRequest * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @param fn * a function that initializes a builder to create the * {@link SearchShardsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2514,10 +6074,14 @@ public final CompletableFuture searchShards( * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2532,7 +6096,7 @@ public CompletableFuture searchShards() { * Run a search with a search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2554,7 +6118,7 @@ public CompletableFuture> searchTe * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2568,7 +6132,7 @@ public final CompletableFuture> se * Run a search with a search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2590,7 +6154,7 @@ public CompletableFuture> searchTe * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2604,21 +6168,18 @@ public final CompletableFuture> se /** * Get terms in an index. *

- * Discover terms that match a partial string in an index. This "terms - * enum" API is designed for low-latency look-ups used in auto-complete - * scenarios. + * Discover terms that match a partial string in an index. This API is designed + * for low-latency look-ups used in auto-complete scenarios.

*

- * If the complete property in the response is false, the returned - * terms set may be incomplete and should be treated as approximate. This can - * occur due to a few reasons, such as a request timeout or a node error. - *

- * NOTE: The terms enum API may return terms from deleted documents. Deleted + * info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. - * + *

+ *
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum">Documentation * on elastic.co */ @@ -2632,24 +6193,21 @@ public CompletableFuture termsEnum(TermsEnumRequest request) /** * Get terms in an index. *

- * Discover terms that match a partial string in an index. This "terms - * enum" API is designed for low-latency look-ups used in auto-complete - * scenarios. + * Discover terms that match a partial string in an index. This API is designed + * for low-latency look-ups used in auto-complete scenarios.

*

- * If the complete property in the response is false, the returned - * terms set may be incomplete and should be treated as approximate. This can - * occur due to a few reasons, such as a request timeout or a node error. - *

- * NOTE: The terms enum API may return terms from deleted documents. Deleted + * info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. - * + *

+ *
+ * * @param fn * a function that initializes a builder to create the * {@link TermsEnumRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum">Documentation * on elastic.co */ @@ -2665,9 +6223,59 @@ public final CompletableFuture termsEnum( *

* Get information and statistics about terms in the fields of a particular * document. + *

+ * You can retrieve term vectors for documents stored in the index or for + * artificial documents passed in the body of the request. You can specify the + * fields you are interested in through the fields parameter or by + * adding the fields to the request body. For example: + * + *

+	 * GET /my-index-000001/_termvectors/1?fields=message
+	 * 
+	 * 
+ *

+ * Fields can be specified using wildcards, similar to the multi match query. + *

+ * Term vectors are real-time by default, not near real-time. This can be + * changed by setting realtime parameter to false. + *

+ * You can request three types of values: term information, term + * statistics, and field statistics. By default, all term + * information and field statistics are returned for all fields but term + * statistics are excluded. + *

+ * Term information + *

    + *
  • term frequency in the field (always returned)
  • + *
  • term positions (positions: true)
  • + *
  • start and end offsets (offsets: true)
  • + *
  • term payloads (payloads: true), as base64 encoded bytes
  • + *
+ *

+ * If the requested information wasn't stored in the index, it will be computed + * on the fly if possible. Additionally, term vectors could be computed for + * documents not even existing in the index, but instead provided by the user. + *

+ *

+ * warn Start and end offsets assume UTF-16 encoding is being used. If you want + * to use these offsets in order to get the original text that produced this + * token, you should make sure that the string you are taking a sub-string of is + * also encoded using UTF-16. + *

+ *
+ *

+ * Behaviour + *

+ * The term and field statistics are not accurate. Deleted documents are not + * taken into account. The information is only retrieved for the shard the + * requested document resides in. The term and field statistics are therefore + * only useful as relative measures whereas the absolute numbers have no meaning + * in this context. By default, when requesting term vectors of artificial + * documents, a shard to get the statistics from is randomly selected. Use + * routing only to hit a particular shard. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors">Documentation * on elastic.co */ @@ -2683,12 +6291,62 @@ public CompletableFuture termvectors(Termvector *

* Get information and statistics about terms in the fields of a particular * document. + *

+ * You can retrieve term vectors for documents stored in the index or for + * artificial documents passed in the body of the request. You can specify the + * fields you are interested in through the fields parameter or by + * adding the fields to the request body. For example: + * + *

+	 * GET /my-index-000001/_termvectors/1?fields=message
+	 * 
+	 * 
+ *

+ * Fields can be specified using wildcards, similar to the multi match query. + *

+ * Term vectors are real-time by default, not near real-time. This can be + * changed by setting realtime parameter to false. + *

+ * You can request three types of values: term information, term + * statistics, and field statistics. By default, all term + * information and field statistics are returned for all fields but term + * statistics are excluded. + *

+ * Term information + *

    + *
  • term frequency in the field (always returned)
  • + *
  • term positions (positions: true)
  • + *
  • start and end offsets (offsets: true)
  • + *
  • term payloads (payloads: true), as base64 encoded bytes
  • + *
+ *

+ * If the requested information wasn't stored in the index, it will be computed + * on the fly if possible. Additionally, term vectors could be computed for + * documents not even existing in the index, but instead provided by the user. + *

+ *

+ * warn Start and end offsets assume UTF-16 encoding is being used. If you want + * to use these offsets in order to get the original text that produced this + * token, you should make sure that the string you are taking a sub-string of is + * also encoded using UTF-16. + *

+ *
+ *

+ * Behaviour + *

+ * The term and field statistics are not accurate. Deleted documents are not + * taken into account. The information is only retrieved for the shard the + * requested document resides in. The term and field statistics are therefore + * only useful as relative measures whereas the absolute numbers have no meaning + * in this context. By default, when requesting term vectors of artificial + * documents, a shard to get the statistics from is randomly selected. Use + * routing only to hit a particular shard. * * @param fn * a function that initializes a builder to create the * {@link TermvectorsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors">Documentation * on elastic.co */ @@ -2700,11 +6358,36 @@ public final CompletableFuture termvectors( // ----- Endpoint: update /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2719,14 +6402,39 @@ public CompletableFuture } /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2737,11 +6445,36 @@ public final CompletableFuture + * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2756,14 +6489,39 @@ public CompletableFuture } /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2780,9 +6538,142 @@ public final CompletableFuture + * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • index or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. + *

+ * When you submit an update by query request, Elasticsearch gets a snapshot of + * the data stream or index when it begins processing the request and updates + * matching documents using internal versioning. When the versions match, the + * document is updated and the version number is incremented. If a document + * changes between the time that the snapshot is taken and the update operation + * is processed, it results in a version conflict and the operation fails. You + * can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts, the operation could attempt to update more + * documents from the source than max_docs until it has + * successfully updated max_docs documents or it has gone through + * every document in the source query. + *

+ * NOTE: Documents with a version equal to 0 cannot be updated using update by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing an update by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents. A bulk + * update request is performed for each batch of matching documents. Any query + * or update failures cause the update by query request to fail and the failures + * are shown in the response. Any update requests that completed successfully + * still stick, they are not rolled back. + *

+ * Throttling update requests + *

+ * To control the rate at which update by query issues batches of update + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to turn off + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is set to + * 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch sizes cause + * Elasticsearch to create many requests and wait before starting the next set. + * This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Update by query supports sliced scroll to parallelize the update process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto chooses a reasonable number + * for most data streams and indices. This setting will use one slice per shard, + * up to a certain limit. If there are multiple source data streams or indices, + * it will choose the number of slices based on the index or backing index with + * the smallest number of shards. + *

+ * Adding slices to _update_by_query just automates + * the manual process of creating sub-requests, which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a perfectly even + * portion of the documents. All documents will be addressed, but some slices + * may be larger than others. Expect larger slices to have a more even + * distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed proportionally + * to each sub-request. Combine that with the point above about distribution + * being uneven and you should conclude that using max_docs with + * slices might not result in exactly max_docs + * documents being updated.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices hurts + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead.
  • + *
  • Update performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or update performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Update the document source + *

+ * Update by query supports scripts to update the document source. As with the + * update API, you can set ctx.op to change the operation that is + * performed. + *

+ * Set ctx.op = "noop" if your script decides that it + * doesn't have to make any changes. The update by query operation skips + * updating the document and increments the noop counter. + *

+ * Set ctx.op = "delete" if your script decides that the + * document should be deleted. The update by query operation deletes the + * document and increments the deleted counter. + *

+ * Update by query supports only index, noop, and + * delete. Setting ctx.op to anything else is an + * error. Setting any other field in ctx is an error. This API + * enables you to only modify the source of matching documents; you cannot move + * them. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query">Documentation * on elastic.co */ @@ -2798,12 +6689,145 @@ public CompletableFuture updateByQuery(UpdateByQueryReque * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • index or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. + *

+ * When you submit an update by query request, Elasticsearch gets a snapshot of + * the data stream or index when it begins processing the request and updates + * matching documents using internal versioning. When the versions match, the + * document is updated and the version number is incremented. If a document + * changes between the time that the snapshot is taken and the update operation + * is processed, it results in a version conflict and the operation fails. You + * can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts, the operation could attempt to update more + * documents from the source than max_docs until it has + * successfully updated max_docs documents or it has gone through + * every document in the source query. + *

+ * NOTE: Documents with a version equal to 0 cannot be updated using update by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing an update by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents. A bulk + * update request is performed for each batch of matching documents. Any query + * or update failures cause the update by query request to fail and the failures + * are shown in the response. Any update requests that completed successfully + * still stick, they are not rolled back. + *

+ * Throttling update requests + *

+ * To control the rate at which update by query issues batches of update + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to turn off + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is set to + * 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch sizes cause + * Elasticsearch to create many requests and wait before starting the next set. + * This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Update by query supports sliced scroll to parallelize the update process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto chooses a reasonable number + * for most data streams and indices. This setting will use one slice per shard, + * up to a certain limit. If there are multiple source data streams or indices, + * it will choose the number of slices based on the index or backing index with + * the smallest number of shards. + *

+ * Adding slices to _update_by_query just automates + * the manual process of creating sub-requests, which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a perfectly even + * portion of the documents. All documents will be addressed, but some slices + * may be larger than others. Expect larger slices to have a more even + * distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed proportionally + * to each sub-request. Combine that with the point above about distribution + * being uneven and you should conclude that using max_docs with + * slices might not result in exactly max_docs + * documents being updated.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices hurts + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead.
  • + *
  • Update performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or update performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Update the document source + *

+ * Update by query supports scripts to update the document source. As with the + * update API, you can set ctx.op to change the operation that is + * performed. + *

+ * Set ctx.op = "noop" if your script decides that it + * doesn't have to make any changes. The update by query operation skips + * updating the document and increments the noop counter. + *

+ * Set ctx.op = "delete" if your script decides that the + * document should be deleted. The update by query operation deletes the + * document and increments the deleted counter. + *

+ * Update by query supports only index, noop, and + * delete. Setting ctx.op to anything else is an + * error. Setting any other field in ctx is an error. This API + * enables you to only modify the source of matching documents; you cannot move + * them. * * @param fn * a function that initializes a builder to create the * {@link UpdateByQueryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query">Documentation * on elastic.co */ @@ -2823,7 +6847,7 @@ public final CompletableFuture updateByQuery( * current batch to prevent scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle">Documentation * on elastic.co */ @@ -2847,7 +6871,7 @@ public CompletableFuture updateByQueryRethrottl * a function that initializes a builder to create the * {@link UpdateByQueryRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java index 7796a7329..41561261c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java @@ -358,12 +358,169 @@ public ElasticsearchXpackClient xpack() { // ----- Endpoint: bulk /** - * Bulk index or delete documents. Performs multiple indexing or delete - * operations in a single API call. This reduces overhead and can greatly - * increase indexing speed. + * Bulk index or delete documents. Perform multiple index, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -375,15 +532,172 @@ public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchE } /** - * Bulk index or delete documents. Performs multiple indexing or delete - * operations in a single API call. This reduces overhead and can greatly - * increase indexing speed. + * Bulk index or delete documents. Perform multiple index, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @param fn * a function that initializes a builder to create the * {@link BulkRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -393,12 +707,169 @@ public final BulkResponse bulk(Functionindex, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * ....
+	 * action_and_meta_data\n
+	 * optional_source\n
+	 * 
+	 * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+	 * $ cat requests
+	 * { "index" : { "_index" : "test", "_id" : "1" } }
+	 * { "field1" : "value1" }
+	 * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+	 * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+	 * 
+	 * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Documentation * on elastic.co */ @@ -410,12 +881,11 @@ public BulkResponse bulk() throws IOException, ElasticsearchException { // ----- Endpoint: clear_scroll /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -427,15 +897,14 @@ public ClearScrollResponse clearScroll(ClearScrollRequest request) throws IOExce } /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @param fn * a function that initializes a builder to create the * {@link ClearScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -446,12 +915,11 @@ public final ClearScrollResponse clearScroll( } /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll">Documentation * on elastic.co */ @@ -463,17 +931,15 @@ public ClearScrollResponse clearScroll() throws IOException, ElasticsearchExcept // ----- Endpoint: close_point_in_time /** - * Close a point in time. - *

- * A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. A point in time is automatically closed when the - * keep_alive period has elapsed. However, keeping points in time - * has a cost; close them as soon as they are no longer required for search - * requests. + * Close a point in time. A point in time must be opened explicitly before being + * used in search requests. The keep_alive parameter tells + * Elasticsearch how long it should persist. A point in time is automatically + * closed when the keep_alive period has elapsed. However, keeping + * points in time has a cost; close them as soon as they are no longer required + * for search requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -486,20 +952,18 @@ public ClosePointInTimeResponse closePointInTime(ClosePointInTimeRequest request } /** - * Close a point in time. - *

- * A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. A point in time is automatically closed when the - * keep_alive period has elapsed. However, keeping points in time - * has a cost; close them as soon as they are no longer required for search - * requests. + * Close a point in time. A point in time must be opened explicitly before being + * used in search requests. The keep_alive parameter tells + * Elasticsearch how long it should persist. A point in time is automatically + * closed when the keep_alive period has elapsed. However, keeping + * points in time has a cost; close them as soon as they are no longer required + * for search requests. * * @param fn * a function that initializes a builder to create the * {@link ClosePointInTimeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -513,9 +977,21 @@ public final ClosePointInTimeResponse closePointInTime( /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -528,12 +1004,24 @@ public CountResponse count(CountRequest request) throws IOException, Elasticsear /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @param fn * a function that initializes a builder to create the * {@link CountRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -544,9 +1032,21 @@ public final CountResponse count(Function + * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count">Documentation * on elastic.co */ @@ -558,12 +1058,132 @@ public CountResponse count() throws IOException, ElasticsearchException { // ----- Endpoint: create /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create a new document in the index. + *

+ * You can index a new JSON document with the /<target>/_doc/ + * or /<target>/_create/<_id> APIs Using + * _create guarantees that the document is indexed only if it does + * not already exist. It returns a 409 response when a document with a same ID + * already exists in the index. To update an existing document, you must use the + * /<target>/_doc/ API. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add a document using the + * PUT /<target>/_create/<_id> or + * POST /<target>/_create/<_id> request formats, you + * must have the create_doc, create, + * index, or write index privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -576,15 +1196,135 @@ public CreateResponse create(CreateRequest request) } /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create a new document in the index. + *

+ * You can index a new JSON document with the /<target>/_doc/ + * or /<target>/_create/<_id> APIs Using + * _create guarantees that the document is indexed only if it does + * not already exist. It returns a 409 response when a document with a same ID + * already exists in the index. To update an existing document, you must use the + * /<target>/_doc/ API. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add a document using the + * PUT /<target>/_create/<_id> or + * POST /<target>/_create/<_id> request formats, you + * must have the create_doc, create, + * index, or write index privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. * * @param fn * a function that initializes a builder to create the * {@link CreateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -597,10 +1337,60 @@ public final CreateResponse create( // ----- Endpoint: delete /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. + *

+ * Remove a JSON document from the specified index. + *

+ * NOTE: You cannot send deletion requests directly to a data stream. To delete + * a document in a data stream, you must target the backing index containing the + * document. + *

+ * Optimistic concurrency control + *

+ * Delete operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Versioning + *

+ * Each document indexed is versioned. When deleting a document, the version can + * be specified to make sure the relevant document you are trying to delete is + * actually being deleted and it has not changed in the meantime. Every write + * operation run on a document, deletes included, causes its version to be + * incremented. The version number of a deleted document remains available for a + * short time after deletion to allow for control of concurrent operations. The + * length of time for which a deleted document's version remains available is + * determined by the index.gc_deletes index setting. + *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to delete a document. + *

+ * If the _routing mapping is set to required and no + * routing value is specified, the delete API throws a + * RoutingMissingException and rejects the request. + *

+ * For example: + * + *

+	 * DELETE /my-index-000001/_doc/1?routing=shard-1
+	 * 
+	 * 
+ *

+ * This request deletes the document with ID 1, but it is routed based on the + * user. The document is not deleted if the correct routing is not specified. + *

+ * Distributed + *

+ * The delete operation gets hashed into a specific shard ID. It then gets + * redirected into the primary shard within that ID group and replicated (if + * needed) to shard replicas within that ID group. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete">Documentation * on elastic.co */ @@ -612,13 +1402,63 @@ public DeleteResponse delete(DeleteRequest request) throws IOException, Elastics } /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. + *

+ * Remove a JSON document from the specified index. + *

+ * NOTE: You cannot send deletion requests directly to a data stream. To delete + * a document in a data stream, you must target the backing index containing the + * document. + *

+ * Optimistic concurrency control + *

+ * Delete operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Versioning + *

+ * Each document indexed is versioned. When deleting a document, the version can + * be specified to make sure the relevant document you are trying to delete is + * actually being deleted and it has not changed in the meantime. Every write + * operation run on a document, deletes included, causes its version to be + * incremented. The version number of a deleted document remains available for a + * short time after deletion to allow for control of concurrent operations. The + * length of time for which a deleted document's version remains available is + * determined by the index.gc_deletes index setting. + *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to delete a document. + *

+ * If the _routing mapping is set to required and no + * routing value is specified, the delete API throws a + * RoutingMissingException and rejects the request. + *

+ * For example: + * + *

+	 * DELETE /my-index-000001/_doc/1?routing=shard-1
+	 * 
+	 * 
+ *

+ * This request deletes the document with ID 1, but it is routed based on the + * user. The document is not deleted if the correct routing is not specified. + *

+ * Distributed + *

+ * The delete operation gets hashed into a specific shard ID. It then gets + * redirected into the primary shard within that ID group and replicated (if + * needed) to shard replicas within that ID group. * * @param fn * a function that initializes a builder to create the * {@link DeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete">Documentation * on elastic.co */ @@ -630,10 +1470,139 @@ public final DeleteResponse delete(Function + * Deletes documents that match the specified query. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • delete or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. When you submit a delete by query + * request, Elasticsearch gets a snapshot of the data stream or index when it + * begins processing the request and deletes matching documents using internal + * versioning. If a document changes between the time that the snapshot is taken + * and the delete operation is processed, it results in a version conflict and + * the delete operation fails. + *

+ * NOTE: Documents with a version equal to 0 cannot be deleted using delete by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing a delete by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents to delete. + * A bulk delete request is performed for each batch of matching documents. If a + * search or bulk request is rejected, the requests are retried up to 10 times, + * with exponential back off. If the maximum retry limit is reached, processing + * halts and all failed requests are returned in the response. Any delete + * requests that completed successfully still stick, they are not rolled back. + *

+ * You can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts the operation could attempt to delete more + * documents from the source than max_docs until it has + * successfully deleted max_docs documents, or it has gone through + * every document in the source query. + *

+ * Throttling delete requests + *

+ * To control the rate at which delete by query issues batches of delete + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to disable + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is + * set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch + * sizes cause Elasticsearch to create many requests and wait before starting + * the next set. This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Delete by query supports sliced scroll to parallelize the delete process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto lets Elasticsearch choose + * the number of slices to use. This setting will use one slice per shard, up to + * a certain limit. If there are multiple source data streams or indices, it + * will choose the number of slices based on the index or backing index with the + * smallest number of shards. Adding slices to the delete by query operation + * creates sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices only contains + * the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the earlier point about + * distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being deleted.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices + * hurts performance. Setting slices higher than the number of + * shards generally does not improve efficiency and adds overhead.
  • + *
  • Delete performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or delete performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Cancel a delete by query operation + *

+ * Any delete by query can be canceled using the task cancel API. For example: + * + *

+	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+	 * 
+	 * 
+ *

+ * The task ID can be found by using the get tasks API. + *

+ * Cancellation should happen quickly but might take a few seconds. The get task + * status API will continue to list the delete by query task until this task + * checks that it has been cancelled and terminates itself. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query">Documentation * on elastic.co */ @@ -646,13 +1615,142 @@ public DeleteByQueryResponse deleteByQuery(DeleteByQueryRequest request) } /** - * Delete documents. Deletes documents that match the specified query. + * Delete documents. + *

+ * Deletes documents that match the specified query. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • delete or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. When you submit a delete by query + * request, Elasticsearch gets a snapshot of the data stream or index when it + * begins processing the request and deletes matching documents using internal + * versioning. If a document changes between the time that the snapshot is taken + * and the delete operation is processed, it results in a version conflict and + * the delete operation fails. + *

+ * NOTE: Documents with a version equal to 0 cannot be deleted using delete by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing a delete by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents to delete. + * A bulk delete request is performed for each batch of matching documents. If a + * search or bulk request is rejected, the requests are retried up to 10 times, + * with exponential back off. If the maximum retry limit is reached, processing + * halts and all failed requests are returned in the response. Any delete + * requests that completed successfully still stick, they are not rolled back. + *

+ * You can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts the operation could attempt to delete more + * documents from the source than max_docs until it has + * successfully deleted max_docs documents, or it has gone through + * every document in the source query. + *

+ * Throttling delete requests + *

+ * To control the rate at which delete by query issues batches of delete + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to disable + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is + * set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch + * sizes cause Elasticsearch to create many requests and wait before starting + * the next set. This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Delete by query supports sliced scroll to parallelize the delete process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto lets Elasticsearch choose + * the number of slices to use. This setting will use one slice per shard, up to + * a certain limit. If there are multiple source data streams or indices, it + * will choose the number of slices based on the index or backing index with the + * smallest number of shards. Adding slices to the delete by query operation + * creates sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices only contains + * the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the earlier point about + * distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being deleted.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices + * hurts performance. Setting slices higher than the number of + * shards generally does not improve efficiency and adds overhead.
  • + *
  • Delete performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or delete performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Cancel a delete by query operation + *

+ * Any delete by query can be canceled using the task cancel API. For example: + * + *

+	 * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+	 * 
+	 * 
+ *

+ * The task ID can be found by using the get tasks API. + *

+ * Cancellation should happen quickly but might take a few seconds. The get task + * status API will continue to list the delete by query task until this task + * checks that it has been cancelled and terminates itself. * * @param fn * a function that initializes a builder to create the * {@link DeleteByQueryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query">Documentation * on elastic.co */ @@ -673,7 +1771,7 @@ public final DeleteByQueryResponse deleteByQuery( * current batch to prevent scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle">Documentation * on elastic.co */ @@ -697,7 +1795,7 @@ public DeleteByQueryRethrottleResponse deleteByQueryRethrottle(DeleteByQueryReth * a function that initializes a builder to create the * {@link DeleteByQueryRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle">Documentation * on elastic.co */ @@ -714,7 +1812,7 @@ public final DeleteByQueryRethrottleResponse deleteByQueryRethrottle( * template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script">Documentation * on elastic.co */ @@ -733,7 +1831,7 @@ public DeleteScriptResponse deleteScript(DeleteScriptRequest request) throws IOE * a function that initializes a builder to create the * {@link DeleteScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script">Documentation * on elastic.co */ @@ -746,10 +1844,32 @@ public final DeleteScriptResponse deleteScript( // ----- Endpoint: exists /** - * Check a document. Checks if a specified document exists. + * Check a document. + *

+ * Verify that a document exists. For example, check to see if a document with + * the _id 0 exists: + * + *

+	 * HEAD my-index-000001/_doc/0
+	 * 
+	 * 
+ *

+ * If the document exists, the API returns a status code of + * 200 - OK. If the document doesn’t exist, the API returns + * 404 - Not Found. + *

+ * Versioning support + *

+ * You can use the version parameter to check the document only if + * its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -761,13 +1881,35 @@ public BooleanResponse exists(ExistsRequest request) throws IOException, Elastic } /** - * Check a document. Checks if a specified document exists. + * Check a document. + *

+ * Verify that a document exists. For example, check to see if a document with + * the _id 0 exists: + * + *

+	 * HEAD my-index-000001/_doc/0
+	 * 
+	 * 
+ *

+ * If the document exists, the API returns a status code of + * 200 - OK. If the document doesn’t exist, the API returns + * 404 - Not Found. + *

+ * Versioning support + *

+ * You can use the version parameter to check the document only if + * its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -779,11 +1921,19 @@ public final BooleanResponse exists(Function_source is - * stored. + * Check for a document source. + *

+ * Check whether a document source exists in an index. For example: + * + *

+	 * HEAD my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * A document's source is not available if it is disabled in the mapping. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -795,14 +1945,22 @@ public BooleanResponse existsSource(ExistsSourceRequest request) throws IOExcept } /** - * Check for a document source. Checks if a document's _source is - * stored. + * Check for a document source. + *

+ * Check whether a document source exists in an index. For example: + * + *

+	 * HEAD my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * A document's source is not available if it is disabled in the mapping. * * @param fn * a function that initializes a builder to create the * {@link ExistsSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -815,11 +1973,12 @@ public final BooleanResponse existsSource( // ----- Endpoint: explain /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -834,14 +1993,15 @@ public ExplainResponse explain(ExplainRequest request, Cl } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -852,11 +2012,12 @@ public final ExplainResponse explain( } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -871,14 +2032,15 @@ public ExplainResponse explain(ExplainRequest request, Ty } /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @param fn * a function that initializes a builder to create the * {@link ExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain">Documentation * on elastic.co */ @@ -901,7 +2063,7 @@ public final ExplainResponse explain( * field that belongs to the keyword family. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -926,7 +2088,7 @@ public FieldCapsResponse fieldCaps(FieldCapsRequest request) throws IOException, * a function that initializes a builder to create the * {@link FieldCapsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -946,7 +2108,7 @@ public final FieldCapsResponse fieldCaps(Functionkeyword family. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps">Documentation * on elastic.co */ @@ -958,11 +2120,81 @@ public FieldCapsResponse fieldCaps() throws IOException, ElasticsearchException // ----- Endpoint: get /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -977,14 +2209,84 @@ public GetResponse get(GetRequest request, Class + * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -994,11 +2296,81 @@ public final GetResponse get(Function + * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1013,14 +2385,84 @@ public GetResponse get(GetRequest request, Type tDocument } /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+	 * GET my-index-000001/_doc/0?_source=false
+	 * 
+	 * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+	 * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+	 * GET my-index-000001/_doc/0?_source=*.id
+	 * 
+	 * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+	 * GET my-index-000001/_doc/2?routing=user1
+	 * 
+	 * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @param fn * a function that initializes a builder to create the * {@link GetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1036,7 +2478,7 @@ public final GetResponse get(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script">Documentation * on elastic.co */ @@ -1055,7 +2497,7 @@ public GetScriptResponse getScript(GetScriptRequest request) throws IOException, * a function that initializes a builder to create the * {@link GetScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script">Documentation * on elastic.co */ @@ -1072,7 +2514,7 @@ public final GetScriptResponse getScript(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context">Documentation * on elastic.co */ public GetScriptContextResponse getScriptContext() throws IOException, ElasticsearchException { @@ -1088,7 +2530,7 @@ public GetScriptContextResponse getScriptContext() throws IOException, Elasticse * Get a list of available script types, languages, and contexts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages">Documentation * on elastic.co */ public GetScriptLanguagesResponse getScriptLanguages() throws IOException, ElasticsearchException { @@ -1099,10 +2541,25 @@ public GetScriptLanguagesResponse getScriptLanguages() throws IOException, Elast // ----- Endpoint: get_source /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: + * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1118,13 +2575,28 @@ public GetSourceResponse getSource(GetSourceRequest reque } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1135,10 +2607,25 @@ public final GetSourceResponse getSource( } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1154,13 +2641,28 @@ public GetSourceResponse getSource(GetSourceRequest reque } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: + * + *

+	 * GET my-index-000001/_source/1
+	 * 
+	 * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: * + *

+	 * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link GetSourceRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get">Documentation * on elastic.co */ @@ -1203,7 +2705,7 @@ public final GetSourceResponse getSource( * false to disable the more expensive analysis logic. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1248,7 +2750,7 @@ public HealthReportResponse healthReport(HealthReportRequest request) throws IOE * a function that initializes a builder to create the * {@link HealthReportRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1289,7 +2791,7 @@ public final HealthReportResponse healthReport( * false to disable the more expensive analysis logic. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report">Documentation * on elastic.co */ @@ -1301,12 +2803,198 @@ public HealthReportResponse healthReport() throws IOException, ElasticsearchExce // ----- Endpoint: index /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create or update a document in an index. + *

+ * Add a JSON document to the specified data stream or index and make it + * searchable. If the target is an index and the document already exists, the + * request updates the document and increments its version. + *

+ * NOTE: You cannot use this API to send update requests for existing documents + * in a data stream. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add or overwrite a document using the + * PUT /<target>/_doc/<_id> request format, you must + * have the create, index, or write index + * privilege.
  • + *
  • To add a document using the POST /<target>/_doc/ + * request format, you must have the create_doc, + * create, index, or write index + * privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * NOTE: Replica shards might not all be started when an indexing operation + * returns successfully. By default, only the primary is required. Set + * wait_for_active_shards to change this default behavior. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Optimistic concurrency control + *

+ * Index operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. + *

+ * No operation (noop) updates + *

+ * When updating a document by using this API, a new version of the document is + * always created even if the document hasn't changed. If this isn't acceptable + * use the _update API with detect_noop set to + * true. The detect_noop option isn't available on + * this API because it doesn’t fetch the old source and isn't able to compare it + * against the new source. + *

+ * There isn't a definitive rule for when noop updates aren't acceptable. It's a + * combination of lots of factors like how frequently your data source sends + * updates that are actually noops and how many queries per second Elasticsearch + * runs on the shard receiving the updates. + *

+ * Versioning + *

+ * Each indexed document is given a version number. By default, internal + * versioning is used that starts at 1 and increments with each update, deletes + * included. Optionally, the version number can be set to an external value (for + * example, if maintained in a database). To enable this functionality, + * version_type should be set to external. The value + * provided must be a numeric, long value greater than or equal to 0, and less + * than around 9.2e+18. + *

+ * NOTE: Versioning is completely real time, and is not affected by the near + * real time aspects of search operations. If no version is provided, the + * operation runs without any version checks. + *

+ * When using the external version type, the system checks to see if the version + * number passed to the index request is greater than the version of the + * currently stored document. If true, the document will be indexed and the new + * version number used. If the value provided is less than or equal to the + * stored document's version number, a version conflict will occur and the index + * operation will fail. For example: * + *

+	 * PUT my-index-000001/_doc/1?version=2&version_type=external
+	 * {
+	 *   "user": {
+	 *     "id": "elkbee"
+	 *   }
+	 * }
+	 *
+	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+	 *
+	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -1318,15 +3006,201 @@ public IndexResponse index(IndexRequest request) throws I } /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create or update a document in an index. + *

+ * Add a JSON document to the specified data stream or index and make it + * searchable. If the target is an index and the document already exists, the + * request updates the document and increments its version. + *

+ * NOTE: You cannot use this API to send update requests for existing documents + * in a data stream. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add or overwrite a document using the + * PUT /<target>/_doc/<_id> request format, you must + * have the create, index, or write index + * privilege.
  • + *
  • To add a document using the POST /<target>/_doc/ + * request format, you must have the create_doc, + * create, index, or write index + * privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * NOTE: Replica shards might not all be started when an indexing operation + * returns successfully. By default, only the primary is required. Set + * wait_for_active_shards to change this default behavior. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Optimistic concurrency control + *

+ * Index operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. + *

+ * No operation (noop) updates + *

+ * When updating a document by using this API, a new version of the document is + * always created even if the document hasn't changed. If this isn't acceptable + * use the _update API with detect_noop set to + * true. The detect_noop option isn't available on + * this API because it doesn’t fetch the old source and isn't able to compare it + * against the new source. + *

+ * There isn't a definitive rule for when noop updates aren't acceptable. It's a + * combination of lots of factors like how frequently your data source sends + * updates that are actually noops and how many queries per second Elasticsearch + * runs on the shard receiving the updates. + *

+ * Versioning + *

+ * Each indexed document is given a version number. By default, internal + * versioning is used that starts at 1 and increments with each update, deletes + * included. Optionally, the version number can be set to an external value (for + * example, if maintained in a database). To enable this functionality, + * version_type should be set to external. The value + * provided must be a numeric, long value greater than or equal to 0, and less + * than around 9.2e+18. + *

+ * NOTE: Versioning is completely real time, and is not affected by the near + * real time aspects of search operations. If no version is provided, the + * operation runs without any version checks. + *

+ * When using the external version type, the system checks to see if the version + * number passed to the index request is greater than the version of the + * currently stored document. If true, the document will be indexed and the new + * version number used. If the value provided is less than or equal to the + * stored document's version number, a version conflict will occur and the index + * operation will fail. For example: * + *

+	 * PUT my-index-000001/_doc/1?version=2&version_type=external
+	 * {
+	 *   "user": {
+	 *     "id": "elkbee"
+	 *   }
+	 * }
+	 *
+	 * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+	 * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+	 *
+	 * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+	 * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link IndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create">Documentation * on elastic.co */ @@ -1342,7 +3216,7 @@ public final IndexResponse index( * Get cluster info. Get basic build, version, and cluster information. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info">Documentation * on elastic.co */ public InfoResponse info() throws IOException, ElasticsearchException { @@ -1368,9 +3242,21 @@ public InfoResponse info() throws IOException, ElasticsearchException { *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1402,12 +3288,24 @@ public KnnSearchResponse knnSearch(KnnSearchRequest reque *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link KnnSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1434,9 +3332,21 @@ public final KnnSearchResponse knnSearch( *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1468,12 +3378,24 @@ public KnnSearchResponse knnSearch(KnnSearchRequest reque *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link KnnSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html">Documentation * on elastic.co */ @@ -1492,9 +3414,27 @@ public final KnnSearchResponse knnSearch( * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1515,12 +3455,30 @@ public MgetResponse mget(MgetRequest request, Class + * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1536,9 +3494,27 @@ public final MgetResponse mget(Function + * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1559,12 +3535,30 @@ public MgetResponse mget(MgetRequest request, Type tDocum * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @param fn * a function that initializes a builder to create the * {@link MgetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget">Documentation * on elastic.co */ @@ -1599,7 +3593,7 @@ public final MgetResponse mget(Functionapplication/x-ndjson. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1640,7 +3634,7 @@ public MsearchResponse msearch(MsearchRequest request, Cl * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1674,7 +3668,7 @@ public final MsearchResponse msearch( * application/x-ndjson. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1715,7 +3709,7 @@ public MsearchResponse msearch(MsearchRequest request, Ty * a function that initializes a builder to create the * {@link MsearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch">Documentation * on elastic.co */ @@ -1729,9 +3723,25 @@ public final MsearchResponse msearch( /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1748,12 +3758,28 @@ public MsearchTemplateResponse msearchTemplate(MsearchTem /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1765,9 +3791,25 @@ public final MsearchTemplateResponse msearchTemplate( /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1784,12 +3826,28 @@ public MsearchTemplateResponse msearchTemplate(MsearchTem /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+	 * $ cat requests
+	 * { "index": "my-index" }
+	 * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+	 * { "index": "my-other-index" }
+	 * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+	 *
+	 * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link MsearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template">Documentation * on elastic.co */ @@ -1804,14 +3862,20 @@ public final MsearchTemplateResponse msearchTemplate( /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1825,17 +3889,23 @@ public MtermvectorsResponse mtermvectors(MtermvectorsRequest request) throws IOE /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @param fn * a function that initializes a builder to create the * {@link MtermvectorsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1848,14 +3918,20 @@ public final MtermvectorsResponse mtermvectors( /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors">Documentation * on elastic.co */ @@ -1879,11 +3955,53 @@ public MtermvectorsResponse mtermvectors() throws IOException, ElasticsearchExce * the more recent point in time. *

* A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. + * requests. + *

+ * A subsequent search request with the pit parameter must not + * specify index, routing, or preference + * values as these parameters are copied from the point in time. + *

+ * Just like regular searches, you can use from and + * size to page through point in time search results, up to the + * first 10,000 hits. If you want to retrieve more hits, use PIT with + * search_after. + *

+ * IMPORTANT: The open point in time request and each subsequent search request + * can return different identifiers; always use the most recently received ID + * for the next search request. + *

+ * When a PIT that contains shard failures is used in a search request, the + * missing are always reported in the search response as a + * NoShardAvailableActionException exception. To get rid of these + * exceptions, a new PIT needs to be created so that shards missing from the + * previous PIT can be handled, assuming they become available in the meantime. + *

+ * Keeping point in time alive + *

+ * The keep_alive parameter, which is passed to a open point in + * time request and search request, extends the time to live of the + * corresponding point in time. The value does not need to be long enough to + * process all data — it just needs to be long enough for the next request. + *

+ * Normally, the background merge process optimizes the index by merging + * together smaller segments to create new, bigger segments. Once the smaller + * segments are no longer needed they are deleted. However, open point-in-times + * prevent the old segments from being deleted since they are still in use. + *

+ * TIP: Keeping older segments alive means that more disk space and file handles + * are needed. Ensure that you have configured your nodes to have ample free + * file handles. + *

+ * Additionally, if a segment contains deleted or updated documents then the + * point in time must keep track of whether each document in the segment was + * live at the time of the initial search request. Ensure that your nodes have + * sufficient heap space if you have many open point-in-times on an index that + * is subject to ongoing deletes or updates. Note that a point-in-time doesn't + * prevent its associated indices from being deleted. You can check how many + * point-in-times (that is, search contexts) are open with the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -1908,14 +4026,56 @@ public OpenPointInTimeResponse openPointInTime(OpenPointInTimeRequest request) * the more recent point in time. *

* A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. + * requests. + *

+ * A subsequent search request with the pit parameter must not + * specify index, routing, or preference + * values as these parameters are copied from the point in time. + *

+ * Just like regular searches, you can use from and + * size to page through point in time search results, up to the + * first 10,000 hits. If you want to retrieve more hits, use PIT with + * search_after. + *

+ * IMPORTANT: The open point in time request and each subsequent search request + * can return different identifiers; always use the most recently received ID + * for the next search request. + *

+ * When a PIT that contains shard failures is used in a search request, the + * missing are always reported in the search response as a + * NoShardAvailableActionException exception. To get rid of these + * exceptions, a new PIT needs to be created so that shards missing from the + * previous PIT can be handled, assuming they become available in the meantime. + *

+ * Keeping point in time alive + *

+ * The keep_alive parameter, which is passed to a open point in + * time request and search request, extends the time to live of the + * corresponding point in time. The value does not need to be long enough to + * process all data — it just needs to be long enough for the next request. + *

+ * Normally, the background merge process optimizes the index by merging + * together smaller segments to create new, bigger segments. Once the smaller + * segments are no longer needed they are deleted. However, open point-in-times + * prevent the old segments from being deleted since they are still in use. + *

+ * TIP: Keeping older segments alive means that more disk space and file handles + * are needed. Ensure that you have configured your nodes to have ample free + * file handles. + *

+ * Additionally, if a segment contains deleted or updated documents then the + * point in time must keep track of whether each document in the segment was + * live at the time of the initial search request. Ensure that your nodes have + * sufficient heap space if you have many open point-in-times on an index that + * is subject to ongoing deletes or updates. Note that a point-in-time doesn't + * prevent its associated indices from being deleted. You can check how many + * point-in-times (that is, search contexts) are open with the nodes stats API. * * @param fn * a function that initializes a builder to create the * {@link OpenPointInTimeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time">Documentation * on elastic.co */ @@ -1931,7 +4091,7 @@ public final OpenPointInTimeResponse openPointInTime( * Ping the cluster. Get information about whether the cluster is running. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster">Documentation * on elastic.co */ public BooleanResponse ping() throws IOException, ElasticsearchException { @@ -1945,7 +4105,7 @@ public BooleanResponse ping() throws IOException, ElasticsearchException { * script or search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script">Documentation * on elastic.co */ @@ -1964,7 +4124,7 @@ public PutScriptResponse putScript(PutScriptRequest request) throws IOException, * a function that initializes a builder to create the * {@link PutScriptRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script">Documentation * on elastic.co */ @@ -1982,7 +4142,7 @@ public final PutScriptResponse putScript(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval">Documentation * on elastic.co */ @@ -1996,32 +4156,296 @@ public RankEvalResponse rankEval(RankEvalRequest request) throws IOException, El /** * Evaluate ranked search results. *

- * Evaluate the quality of ranked search results over a set of typical search - * queries. + * Evaluate the quality of ranked search results over a set of typical search + * queries. + * + * @param fn + * a function that initializes a builder to create the + * {@link RankEvalRequest} + * @see Documentation + * on elastic.co + */ + + public final RankEvalResponse rankEval(Function> fn) + throws IOException, ElasticsearchException { + return rankEval(fn.apply(new RankEvalRequest.Builder()).build()); + } + + // ----- Endpoint: reindex + + /** + * Reindex documents. + *

+ * Copy documents from a source to a destination. You can copy all documents to + * the destination index or reindex a subset of the documents. The source can be + * any existing index, alias, or data stream. The destination must differ from + * the source. For example, you cannot reindex a data stream into itself. + *

+ * IMPORTANT: Reindex requires _source to be enabled for all + * documents in the source. The destination should be configured as wanted + * before calling the reindex API. Reindex does not copy the settings from the + * source or its associated template. Mappings, shard counts, and replicas, for + * example, must be configured ahead of time. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following security privileges: + *

    + *
  • The read index privilege for the source data stream, index, + * or alias.
  • + *
  • The write index privilege for the destination data stream, + * index, or index alias.
  • + *
  • To automatically create a data stream or index with a reindex API + * request, you must have the auto_configure, + * create_index, or manage index privilege for the + * destination data stream, index, or alias.
  • + *
  • If reindexing from a remote cluster, the source.remote.user + * must have the monitor cluster privilege and the + * read index privilege for the source data stream, index, or + * alias.
  • + *
+ *

+ * If reindexing from a remote cluster, you must explicitly allow the remote + * host in the reindex.remote.whitelist setting. Automatic data + * stream creation requires a matching index template with data stream enabled. + *

+ * The dest element can be configured like the index API to control + * optimistic concurrency control. Omitting version_type or setting + * it to internal causes Elasticsearch to blindly dump documents + * into the destination, overwriting any that happen to have the same ID. + *

+ * Setting version_type to external causes + * Elasticsearch to preserve the version from the source, create + * any documents that are missing, and update any documents that have an older + * version in the destination than they do in the source. + *

+ * Setting op_type to create causes the reindex API to + * create only missing documents in the destination. All existing documents will + * cause a version conflict. + *

+ * IMPORTANT: Because data streams are append-only, any reindex request to a + * destination data stream must have an op_type of + * create. A reindex can only add new documents to a destination + * data stream. It cannot update existing documents in a destination data + * stream. + *

+ * By default, version conflicts abort the reindex process. To continue + * reindexing if there are conflicts, set the conflicts request + * body property to proceed. In this case, the response includes a + * count of the version conflicts that were encountered. Note that the handling + * of other error types is unaffected by the conflicts property. + * Additionally, if you opt to count version conflicts, the operation could + * attempt to reindex more documents from the source than max_docs + * until it has successfully indexed max_docs documents into the + * target or it has gone through every document in the source query. + *

+ * NOTE: The reindex API makes no effort to handle ID collisions. The last + * document written will "win" but the order isn't usually predictable + * so it is not a good idea to rely on this behavior. Instead, make sure that + * IDs are unique by using a script. + *

+ * Running reindex asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to cancel or get the status of the task. Elasticsearch creates a + * record of this task as a document at _tasks/<task_id>. + *

+ * Reindex from multiple sources + *

+ * If you have many sources to reindex it is generally better to reindex them + * one at a time rather than using a glob pattern to pick up multiple sources. + * That way you can resume the process if there are any errors by removing the + * partially completed source and starting over. It also makes parallelizing the + * process fairly simple: split the list of sources to reindex and run each list + * in parallel. + *

+ * For example, you can use a bash script like this: + * + *

+	 * for index in i1 i2 i3 i4 i5; do
+	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+	 *     "source": {
+	 *       "index": "'$index'"
+	 *     },
+	 *     "dest": {
+	 *       "index": "'$index'-reindexed"
+	 *     }
+	 *   }'
+	 * done
+	 * 
+	 * 
+ *

+ * Throttling + *

+ * Set requests_per_second to any positive decimal number + * (1.4, 6, 1000, for example) to + * throttle the rate at which reindex issues batches of index operations. + * Requests are throttled by padding each batch with a wait time. To turn off + * throttling, set requests_per_second to -1. + *

+ * The throttling is done by waiting between batches so that the scroll that + * reindex uses internally can be given a timeout that takes into account the + * padding. The padding time is the difference between the batch size divided by + * the requests_per_second and the time spent writing. By default + * the batch size is 1000, so if requests_per_second + * is set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single bulk request, large batch sizes cause + * Elasticsearch to create many requests and then wait for a while before + * starting the next set. This is "bursty" instead of + * "smooth". + *

+ * Slicing + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * You can slice a reindex request manually by providing a slice ID and total + * number of slices to each request. You can also let reindex automatically + * parallelize by using sliced scroll to slice on _id. The + * slices parameter specifies the number of slices to use. + *

+ * Adding slices to the reindex request just automates the manual + * process, creating sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks API. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices, each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the previous point + * about distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being reindexed.
  • + *
  • Each sub-request gets a slightly different snapshot of the source, though + * these are all taken at approximately the same time.
  • + *
+ *

+ * If slicing automatically, setting slices to auto + * will choose a reasonable number for most indices. If slicing manually or + * otherwise tuning automatic slicing, use the following guidelines. + *

+ * Query performance is most efficient when the number of slices is equal to the + * number of shards in the index. If that number is large (for example, + * 500), choose a lower number as too many slices will hurt + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead. + *

+ * Indexing performance scales linearly across available resources with the + * number of slices. + *

+ * Whether query or indexing performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Modify documents during reindexing + *

+ * Like _update_by_query, reindex operations support a script that + * modifies the document. Unlike _update_by_query, the script is + * allowed to modify the document's metadata. + *

+ * Just as in _update_by_query, you can set ctx.op to + * change the operation that is run on the destination. For example, set + * ctx.op to noop if your script decides that the + * document doesn’t have to be indexed in the destination. This "no + * operation" will be reported in the noop counter in the + * response body. Set ctx.op to delete if your script + * decides that the document must be deleted from the destination. The deletion + * will be reported in the deleted counter in the response body. + * Setting ctx.op to anything else will return an error, as will + * setting any other field in ctx. + *

+ * Think of the possibilities! Just be careful; you are able to change: + *

    + *
  • _id
  • + *
  • _index
  • + *
  • _version
  • + *
  • _routing
  • + *
+ *

+ * Setting _version to null or clearing it from the + * ctx map is just like not sending the version in an indexing + * request. It will cause the document to be overwritten in the destination + * regardless of the version on the target or the version type you use in the + * reindex API. + *

+ * Reindex from remote + *

+ * Reindex supports reindexing from a remote Elasticsearch cluster. The + * host parameter must contain a scheme, host, port, and optional + * path. The username and password parameters are + * optional and when they are present the reindex operation will connect to the + * remote Elasticsearch node using basic authentication. Be sure to use HTTPS + * when using basic authentication or the password will be sent in plain text. + * There are a range of settings available to configure the behavior of the + * HTTPS connection. + *

+ * When using Elastic Cloud, it is also possible to authenticate against the + * remote cluster through the use of a valid API key. Remote hosts must be + * explicitly allowed with the reindex.remote.whitelist setting. It + * can be set to a comma delimited list of allowed remote host and port + * combinations. Scheme is ignored; only the host and port are used. For + * example: * - * @param fn - * a function that initializes a builder to create the - * {@link RankEvalRequest} - * @see Documentation - * on elastic.co - */ - - public final RankEvalResponse rankEval(Function> fn) - throws IOException, ElasticsearchException { - return rankEval(fn.apply(new RankEvalRequest.Builder()).build()); - } - - // ----- Endpoint: reindex - - /** - * Reindex documents. Copies documents from a source to a destination. The - * source can be any existing index, alias, or data stream. The destination must - * differ from the source. For example, you cannot reindex a data stream into - * itself. + *

+	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+	 * 
+	 * 
+ *

+ * The list of allowed hosts must be configured on any nodes that will + * coordinate the reindex. This feature should work with remote clusters of any + * version of Elasticsearch. This should enable you to upgrade from any version + * of Elasticsearch to the current version by reindexing from a cluster of the + * old version. + *

+ * WARNING: Elasticsearch does not support forward compatibility across major + * versions. For example, you cannot reindex from a 7.x cluster into a 6.x + * cluster. + *

+ * To enable queries sent to older versions of Elasticsearch, the + * query parameter is sent directly to the remote host without + * validation or modification. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * Reindexing from a remote server uses an on-heap buffer that defaults to a + * maximum size of 100mb. If the remote index includes very large documents + * you'll need to use a smaller batch size. It is also possible to set the + * socket read timeout on the remote connection with the + * socket_timeout field and the connection timeout with the + * connect_timeout field. Both default to 30 seconds. + *

+ * Configuring SSL parameters + *

+ * Reindex from remote supports configurable SSL settings. These must be + * specified in the elasticsearch.yml file, with the exception of + * the secure settings, which you add in the Elasticsearch keystore. It is not + * possible to configure SSL in the body of the reindex request. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2033,16 +4457,280 @@ public ReindexResponse reindex(ReindexRequest request) throws IOException, Elast } /** - * Reindex documents. Copies documents from a source to a destination. The - * source can be any existing index, alias, or data stream. The destination must - * differ from the source. For example, you cannot reindex a data stream into - * itself. + * Reindex documents. + *

+ * Copy documents from a source to a destination. You can copy all documents to + * the destination index or reindex a subset of the documents. The source can be + * any existing index, alias, or data stream. The destination must differ from + * the source. For example, you cannot reindex a data stream into itself. + *

+ * IMPORTANT: Reindex requires _source to be enabled for all + * documents in the source. The destination should be configured as wanted + * before calling the reindex API. Reindex does not copy the settings from the + * source or its associated template. Mappings, shard counts, and replicas, for + * example, must be configured ahead of time. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following security privileges: + *

    + *
  • The read index privilege for the source data stream, index, + * or alias.
  • + *
  • The write index privilege for the destination data stream, + * index, or index alias.
  • + *
  • To automatically create a data stream or index with a reindex API + * request, you must have the auto_configure, + * create_index, or manage index privilege for the + * destination data stream, index, or alias.
  • + *
  • If reindexing from a remote cluster, the source.remote.user + * must have the monitor cluster privilege and the + * read index privilege for the source data stream, index, or + * alias.
  • + *
+ *

+ * If reindexing from a remote cluster, you must explicitly allow the remote + * host in the reindex.remote.whitelist setting. Automatic data + * stream creation requires a matching index template with data stream enabled. + *

+ * The dest element can be configured like the index API to control + * optimistic concurrency control. Omitting version_type or setting + * it to internal causes Elasticsearch to blindly dump documents + * into the destination, overwriting any that happen to have the same ID. + *

+ * Setting version_type to external causes + * Elasticsearch to preserve the version from the source, create + * any documents that are missing, and update any documents that have an older + * version in the destination than they do in the source. + *

+ * Setting op_type to create causes the reindex API to + * create only missing documents in the destination. All existing documents will + * cause a version conflict. + *

+ * IMPORTANT: Because data streams are append-only, any reindex request to a + * destination data stream must have an op_type of + * create. A reindex can only add new documents to a destination + * data stream. It cannot update existing documents in a destination data + * stream. + *

+ * By default, version conflicts abort the reindex process. To continue + * reindexing if there are conflicts, set the conflicts request + * body property to proceed. In this case, the response includes a + * count of the version conflicts that were encountered. Note that the handling + * of other error types is unaffected by the conflicts property. + * Additionally, if you opt to count version conflicts, the operation could + * attempt to reindex more documents from the source than max_docs + * until it has successfully indexed max_docs documents into the + * target or it has gone through every document in the source query. + *

+ * NOTE: The reindex API makes no effort to handle ID collisions. The last + * document written will "win" but the order isn't usually predictable + * so it is not a good idea to rely on this behavior. Instead, make sure that + * IDs are unique by using a script. + *

+ * Running reindex asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to cancel or get the status of the task. Elasticsearch creates a + * record of this task as a document at _tasks/<task_id>. + *

+ * Reindex from multiple sources + *

+ * If you have many sources to reindex it is generally better to reindex them + * one at a time rather than using a glob pattern to pick up multiple sources. + * That way you can resume the process if there are any errors by removing the + * partially completed source and starting over. It also makes parallelizing the + * process fairly simple: split the list of sources to reindex and run each list + * in parallel. + *

+ * For example, you can use a bash script like this: + * + *

+	 * for index in i1 i2 i3 i4 i5; do
+	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+	 *     "source": {
+	 *       "index": "'$index'"
+	 *     },
+	 *     "dest": {
+	 *       "index": "'$index'-reindexed"
+	 *     }
+	 *   }'
+	 * done
+	 * 
+	 * 
+ *

+ * Throttling + *

+ * Set requests_per_second to any positive decimal number + * (1.4, 6, 1000, for example) to + * throttle the rate at which reindex issues batches of index operations. + * Requests are throttled by padding each batch with a wait time. To turn off + * throttling, set requests_per_second to -1. + *

+ * The throttling is done by waiting between batches so that the scroll that + * reindex uses internally can be given a timeout that takes into account the + * padding. The padding time is the difference between the batch size divided by + * the requests_per_second and the time spent writing. By default + * the batch size is 1000, so if requests_per_second + * is set to 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single bulk request, large batch sizes cause + * Elasticsearch to create many requests and then wait for a while before + * starting the next set. This is "bursty" instead of + * "smooth". + *

+ * Slicing + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * You can slice a reindex request manually by providing a slice ID and total + * number of slices to each request. You can also let reindex automatically + * parallelize by using sliced scroll to slice on _id. The + * slices parameter specifies the number of slices to use. + *

+ * Adding slices to the reindex request just automates the manual + * process, creating sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks API. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices, each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the previous point + * about distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being reindexed.
  • + *
  • Each sub-request gets a slightly different snapshot of the source, though + * these are all taken at approximately the same time.
  • + *
+ *

+ * If slicing automatically, setting slices to auto + * will choose a reasonable number for most indices. If slicing manually or + * otherwise tuning automatic slicing, use the following guidelines. + *

+ * Query performance is most efficient when the number of slices is equal to the + * number of shards in the index. If that number is large (for example, + * 500), choose a lower number as too many slices will hurt + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead. + *

+ * Indexing performance scales linearly across available resources with the + * number of slices. + *

+ * Whether query or indexing performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Modify documents during reindexing + *

+ * Like _update_by_query, reindex operations support a script that + * modifies the document. Unlike _update_by_query, the script is + * allowed to modify the document's metadata. + *

+ * Just as in _update_by_query, you can set ctx.op to + * change the operation that is run on the destination. For example, set + * ctx.op to noop if your script decides that the + * document doesn’t have to be indexed in the destination. This "no + * operation" will be reported in the noop counter in the + * response body. Set ctx.op to delete if your script + * decides that the document must be deleted from the destination. The deletion + * will be reported in the deleted counter in the response body. + * Setting ctx.op to anything else will return an error, as will + * setting any other field in ctx. + *

+ * Think of the possibilities! Just be careful; you are able to change: + *

    + *
  • _id
  • + *
  • _index
  • + *
  • _version
  • + *
  • _routing
  • + *
+ *

+ * Setting _version to null or clearing it from the + * ctx map is just like not sending the version in an indexing + * request. It will cause the document to be overwritten in the destination + * regardless of the version on the target or the version type you use in the + * reindex API. + *

+ * Reindex from remote + *

+ * Reindex supports reindexing from a remote Elasticsearch cluster. The + * host parameter must contain a scheme, host, port, and optional + * path. The username and password parameters are + * optional and when they are present the reindex operation will connect to the + * remote Elasticsearch node using basic authentication. Be sure to use HTTPS + * when using basic authentication or the password will be sent in plain text. + * There are a range of settings available to configure the behavior of the + * HTTPS connection. + *

+ * When using Elastic Cloud, it is also possible to authenticate against the + * remote cluster through the use of a valid API key. Remote hosts must be + * explicitly allowed with the reindex.remote.whitelist setting. It + * can be set to a comma delimited list of allowed remote host and port + * combinations. Scheme is ignored; only the host and port are used. For + * example: + * + *

+	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+	 * 
+	 * 
+ *

+ * The list of allowed hosts must be configured on any nodes that will + * coordinate the reindex. This feature should work with remote clusters of any + * version of Elasticsearch. This should enable you to upgrade from any version + * of Elasticsearch to the current version by reindexing from a cluster of the + * old version. + *

+ * WARNING: Elasticsearch does not support forward compatibility across major + * versions. For example, you cannot reindex from a 7.x cluster into a 6.x + * cluster. + *

+ * To enable queries sent to older versions of Elasticsearch, the + * query parameter is sent directly to the remote host without + * validation or modification. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * Reindexing from a remote server uses an on-heap buffer that defaults to a + * maximum size of 100mb. If the remote index includes very large documents + * you'll need to use a smaller batch size. It is also possible to set the + * socket read timeout on the remote connection with the + * socket_timeout field and the connection timeout with the + * connect_timeout field. Both default to 30 seconds. + *

+ * Configuring SSL parameters + *

+ * Reindex from remote supports configurable SSL settings. These must be + * specified in the elasticsearch.yml file, with the exception of + * the secure settings, which you add in the Elasticsearch keystore. It is not + * possible to configure SSL in the body of the reindex request. * * @param fn * a function that initializes a builder to create the * {@link ReindexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2057,9 +4745,19 @@ public final ReindexResponse reindex(Function * Change the number of requests per second for a particular reindex operation. + * For example: + * + *

+	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+	 * 
+	 * 
+ *

+ * Rethrottling that speeds up the query takes effect immediately. Rethrottling + * that slows down the query will take effect after completing the current + * batch. This behavior prevents scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2075,12 +4773,22 @@ public ReindexRethrottleResponse reindexRethrottle(ReindexRethrottleRequest requ * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. + * For example: + * + *

+	 * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+	 * 
+	 * 
+ *

+ * Rethrottling that speeds up the query takes effect immediately. Rethrottling + * that slows down the query will take effect after completing the current + * batch. This behavior prevents scroll timeouts. * * @param fn * a function that initializes a builder to create the * {@link ReindexRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex">Documentation * on elastic.co */ @@ -2098,7 +4806,7 @@ public final ReindexRethrottleResponse reindexRethrottle( * Render a search template as a search request body. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2119,7 +4827,7 @@ public RenderSearchTemplateResponse renderSearchTemplate(RenderSearchTemplateReq * a function that initializes a builder to create the * {@link RenderSearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2135,7 +4843,7 @@ public final RenderSearchTemplateResponse renderSearchTemplate( * Render a search template as a search request body. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template">Documentation * on elastic.co */ @@ -2147,10 +4855,21 @@ public RenderSearchTemplateResponse renderSearchTemplate() throws IOException, E // ----- Endpoint: scripts_painless_execute /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2167,13 +4886,24 @@ public ScriptsPainlessExecuteResponse scriptsPainlessExecute( } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2184,10 +4914,21 @@ public final ScriptsPainlessExecuteResponse scriptsPainlessEx } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2203,13 +4944,24 @@ public ScriptsPainlessExecuteResponse scriptsPainlessExecute( } /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @param fn * a function that initializes a builder to create the * {@link ScriptsPainlessExecuteRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html">Documentation * on elastic.co */ @@ -2247,7 +4999,7 @@ public final ScriptsPainlessExecuteResponse scriptsPainlessEx * changes only affect later search and scroll requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2290,7 +5042,7 @@ public ScrollResponse scroll(ScrollRequest request, Class * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2326,7 +5078,7 @@ public final ScrollResponse scroll( * changes only affect later search and scroll requests. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2369,7 +5121,7 @@ public ScrollResponse scroll(ScrollRequest request, Type * a function that initializes a builder to create the * {@link ScrollRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll">Documentation * on elastic.co */ @@ -2387,9 +5139,32 @@ public final ScrollResponse scroll( * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2409,12 +5184,35 @@ public SearchResponse search(SearchRequest request, Class * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2430,9 +5228,32 @@ public final SearchResponse search( * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2452,12 +5273,35 @@ public SearchResponse search(SearchRequest request, Type * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @param fn * a function that initializes a builder to create the * {@link SearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search">Documentation * on elastic.co */ @@ -2472,10 +5316,364 @@ public final SearchResponse search( /** * Search a vector tile. *

- * Search a vector tile for geospatial values. + * Search a vector tile for geospatial values. Before using this API, you should + * be familiar with the Mapbox vector tile specification. The API returns + * results as a binary mapbox vector tile. + *

+ * Internally, Elasticsearch translates a vector tile search API request into a + * search containing: + *

    + *
  • A geo_bounding_box query on the <field>. + * The query uses the <zoom>/<x>/<y> tile as a + * bounding box.
  • + *
  • A geotile_grid or geohex_grid aggregation on + * the <field>. The grid_agg parameter + * determines the aggregation type. The aggregation uses the + * <zoom>/<x>/<y> tile as a bounding box.
  • + *
  • Optionally, a geo_bounds aggregation on the + * <field>. The search only includes this aggregation if the + * exact_bounds parameter is true.
  • + *
  • If the optional parameter with_labels is true, + * the internal search will include a dynamic runtime field that calls the + * getLabelPosition function of the geometry doc value. This + * enables the generation of new point features containing suggested geometry + * labels, so that, for example, multi-polygons will have only one label.
  • + *
+ *

+ * For example, Elasticsearch may translate a vector tile search API request + * with a grid_agg argument of geotile and an + * exact_bounds argument of true into the following + * search + * + *

+	 * GET my-index/_search
+	 * {
+	 *   "size": 10000,
+	 *   "query": {
+	 *     "geo_bounding_box": {
+	 *       "my-geo-field": {
+	 *         "top_left": {
+	 *           "lat": -40.979898069620134,
+	 *           "lon": -45
+	 *         },
+	 *         "bottom_right": {
+	 *           "lat": -66.51326044311186,
+	 *           "lon": 0
+	 *         }
+	 *       }
+	 *     }
+	 *   },
+	 *   "aggregations": {
+	 *     "grid": {
+	 *       "geotile_grid": {
+	 *         "field": "my-geo-field",
+	 *         "precision": 11,
+	 *         "size": 65536,
+	 *         "bounds": {
+	 *           "top_left": {
+	 *             "lat": -40.979898069620134,
+	 *             "lon": -45
+	 *           },
+	 *           "bottom_right": {
+	 *             "lat": -66.51326044311186,
+	 *             "lon": 0
+	 *           }
+	 *         }
+	 *       }
+	 *     },
+	 *     "bounds": {
+	 *       "geo_bounds": {
+	 *         "field": "my-geo-field",
+	 *         "wrap_longitude": false
+	 *       }
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ *

+ * The API returns results as a binary Mapbox vector tile. Mapbox vector tiles + * are encoded as Google Protobufs (PBF). By default, the tile contains three + * layers: + *

    + *
  • A hits layer containing a feature for each + * <field> value matching the geo_bounding_box + * query.
  • + *
  • An aggs layer containing a feature for each cell of the + * geotile_grid or geohex_grid. The layer only + * contains features for cells with matching data.
  • + *
  • A meta layer containing: + *
      + *
    • A feature containing a bounding box. By default, this is the bounding box + * of the tile.
    • + *
    • Value ranges for any sub-aggregations on the geotile_grid or + * geohex_grid.
    • + *
    • Metadata for the search.
    • + *
    + *
  • + *
+ *

+ * The API only returns features that can display at its zoom level. For + * example, if a polygon feature has no area at its zoom level, the API omits + * it. The API returns errors as UTF-8 encoded JSON. + *

+ * IMPORTANT: You can specify several options for this API as either a query + * parameter or request body parameter. If you specify both parameters, the + * query parameter takes precedence. + *

+ * Grid precision for geotile + *

+ * For a grid_agg of geotile, you can use cells in the + * aggs layer as tiles for lower zoom levels. + * grid_precision represents the additional zoom levels available + * through these cells. The final precision is computed by as follows: + * <zoom> + grid_precision. For example, if + * <zoom> is 7 and grid_precision is 8, then the + * geotile_grid aggregation will use a precision of 15. The maximum + * final precision is 29. The grid_precision also determines the + * number of cells for the grid as follows: + * (2^grid_precision) x (2^grid_precision). For example, a value of + * 8 divides the tile into a grid of 256 x 256 cells. The aggs + * layer only contains features for cells with matching data. + *

+ * Grid precision for geohex + *

+ * For a grid_agg of geohex, Elasticsearch uses + * <zoom> and grid_precision to calculate a + * final precision as follows: <zoom> + grid_precision. + *

+ * This precision determines the H3 resolution of the hexagonal cells produced + * by the geohex aggregation. The following table maps the H3 + * resolution for each precision. For example, if <zoom> is 3 + * and grid_precision is 3, the precision is 6. At a precision of + * 6, hexagonal cells have an H3 resolution of 2. If <zoom> + * is 3 and grid_precision is 4, the precision is 7. At a precision + * of 7, hexagonal cells have an H3 resolution of 3. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
+ *

+ * Hexagonal cells don't align perfectly on a vector tile. Some cells may + * intersect more than one vector tile. To compute the H3 resolution for each + * precision, Elasticsearch compares the average density of hexagonal bins at + * each resolution with the average density of tile bins at each zoom level. + * Elasticsearch uses the H3 resolution that is closest to the corresponding + * geotile density. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt">Documentation * on elastic.co */ @@ -2489,13 +5687,367 @@ public BinaryResponse searchMvt(SearchMvtRequest request) throws IOException, El /** * Search a vector tile. *

- * Search a vector tile for geospatial values. + * Search a vector tile for geospatial values. Before using this API, you should + * be familiar with the Mapbox vector tile specification. The API returns + * results as a binary mapbox vector tile. + *

+ * Internally, Elasticsearch translates a vector tile search API request into a + * search containing: + *

    + *
  • A geo_bounding_box query on the <field>. + * The query uses the <zoom>/<x>/<y> tile as a + * bounding box.
  • + *
  • A geotile_grid or geohex_grid aggregation on + * the <field>. The grid_agg parameter + * determines the aggregation type. The aggregation uses the + * <zoom>/<x>/<y> tile as a bounding box.
  • + *
  • Optionally, a geo_bounds aggregation on the + * <field>. The search only includes this aggregation if the + * exact_bounds parameter is true.
  • + *
  • If the optional parameter with_labels is true, + * the internal search will include a dynamic runtime field that calls the + * getLabelPosition function of the geometry doc value. This + * enables the generation of new point features containing suggested geometry + * labels, so that, for example, multi-polygons will have only one label.
  • + *
+ *

+ * For example, Elasticsearch may translate a vector tile search API request + * with a grid_agg argument of geotile and an + * exact_bounds argument of true into the following + * search + * + *

+	 * GET my-index/_search
+	 * {
+	 *   "size": 10000,
+	 *   "query": {
+	 *     "geo_bounding_box": {
+	 *       "my-geo-field": {
+	 *         "top_left": {
+	 *           "lat": -40.979898069620134,
+	 *           "lon": -45
+	 *         },
+	 *         "bottom_right": {
+	 *           "lat": -66.51326044311186,
+	 *           "lon": 0
+	 *         }
+	 *       }
+	 *     }
+	 *   },
+	 *   "aggregations": {
+	 *     "grid": {
+	 *       "geotile_grid": {
+	 *         "field": "my-geo-field",
+	 *         "precision": 11,
+	 *         "size": 65536,
+	 *         "bounds": {
+	 *           "top_left": {
+	 *             "lat": -40.979898069620134,
+	 *             "lon": -45
+	 *           },
+	 *           "bottom_right": {
+	 *             "lat": -66.51326044311186,
+	 *             "lon": 0
+	 *           }
+	 *         }
+	 *       }
+	 *     },
+	 *     "bounds": {
+	 *       "geo_bounds": {
+	 *         "field": "my-geo-field",
+	 *         "wrap_longitude": false
+	 *       }
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ *

+ * The API returns results as a binary Mapbox vector tile. Mapbox vector tiles + * are encoded as Google Protobufs (PBF). By default, the tile contains three + * layers: + *

    + *
  • A hits layer containing a feature for each + * <field> value matching the geo_bounding_box + * query.
  • + *
  • An aggs layer containing a feature for each cell of the + * geotile_grid or geohex_grid. The layer only + * contains features for cells with matching data.
  • + *
  • A meta layer containing: + *
      + *
    • A feature containing a bounding box. By default, this is the bounding box + * of the tile.
    • + *
    • Value ranges for any sub-aggregations on the geotile_grid or + * geohex_grid.
    • + *
    • Metadata for the search.
    • + *
    + *
  • + *
+ *

+ * The API only returns features that can display at its zoom level. For + * example, if a polygon feature has no area at its zoom level, the API omits + * it. The API returns errors as UTF-8 encoded JSON. + *

+ * IMPORTANT: You can specify several options for this API as either a query + * parameter or request body parameter. If you specify both parameters, the + * query parameter takes precedence. + *

+ * Grid precision for geotile + *

+ * For a grid_agg of geotile, you can use cells in the + * aggs layer as tiles for lower zoom levels. + * grid_precision represents the additional zoom levels available + * through these cells. The final precision is computed by as follows: + * <zoom> + grid_precision. For example, if + * <zoom> is 7 and grid_precision is 8, then the + * geotile_grid aggregation will use a precision of 15. The maximum + * final precision is 29. The grid_precision also determines the + * number of cells for the grid as follows: + * (2^grid_precision) x (2^grid_precision). For example, a value of + * 8 divides the tile into a grid of 256 x 256 cells. The aggs + * layer only contains features for cells with matching data. + *

+ * Grid precision for geohex + *

+ * For a grid_agg of geohex, Elasticsearch uses + * <zoom> and grid_precision to calculate a + * final precision as follows: <zoom> + grid_precision. + *

+ * This precision determines the H3 resolution of the hexagonal cells produced + * by the geohex aggregation. The following table maps the H3 + * resolution for each precision. For example, if <zoom> is 3 + * and grid_precision is 3, the precision is 6. At a precision of + * 6, hexagonal cells have an H3 resolution of 2. If <zoom> + * is 3 and grid_precision is 4, the precision is 7. At a precision + * of 7, hexagonal cells have an H3 resolution of 3. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
+ *

+ * Hexagonal cells don't align perfectly on a vector tile. Some cells may + * intersect more than one vector tile. To compute the H3 resolution for each + * precision, Elasticsearch compares the average density of hexagonal bins at + * each resolution with the average density of tile bins at each zoom level. + * Elasticsearch uses the H3 resolution that is closest to the corresponding + * geotile density. * * @param fn * a function that initializes a builder to create the * {@link SearchMvtRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt">Documentation * on elastic.co */ @@ -2512,10 +6064,14 @@ public final BinaryResponse searchMvt(Functionindices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2532,13 +6088,17 @@ public SearchShardsResponse searchShards(SearchShardsRequest request) throws IOE * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @param fn * a function that initializes a builder to create the * {@link SearchShardsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2554,10 +6114,14 @@ public final SearchShardsResponse searchShards( * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards">Documentation * on elastic.co */ @@ -2572,7 +6136,7 @@ public SearchShardsResponse searchShards() throws IOException, ElasticsearchExce * Run a search with a search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2594,7 +6158,7 @@ public SearchTemplateResponse searchTemplate(SearchTempla * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2608,7 +6172,7 @@ public final SearchTemplateResponse searchTemplate( * Run a search with a search template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2630,7 +6194,7 @@ public SearchTemplateResponse searchTemplate(SearchTempla * a function that initializes a builder to create the * {@link SearchTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template">Documentation * on elastic.co */ @@ -2645,21 +6209,18 @@ public final SearchTemplateResponse searchTemplate( /** * Get terms in an index. *

- * Discover terms that match a partial string in an index. This "terms - * enum" API is designed for low-latency look-ups used in auto-complete - * scenarios. + * Discover terms that match a partial string in an index. This API is designed + * for low-latency look-ups used in auto-complete scenarios.

*

- * If the complete property in the response is false, the returned - * terms set may be incomplete and should be treated as approximate. This can - * occur due to a few reasons, such as a request timeout or a node error. - *

- * NOTE: The terms enum API may return terms from deleted documents. Deleted + * info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. - * + *

+ *
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum">Documentation * on elastic.co */ @@ -2673,24 +6234,21 @@ public TermsEnumResponse termsEnum(TermsEnumRequest request) throws IOException, /** * Get terms in an index. *

- * Discover terms that match a partial string in an index. This "terms - * enum" API is designed for low-latency look-ups used in auto-complete - * scenarios. + * Discover terms that match a partial string in an index. This API is designed + * for low-latency look-ups used in auto-complete scenarios.

*

- * If the complete property in the response is false, the returned - * terms set may be incomplete and should be treated as approximate. This can - * occur due to a few reasons, such as a request timeout or a node error. - *

- * NOTE: The terms enum API may return terms from deleted documents. Deleted + * info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. - * + *

+ *
+ * * @param fn * a function that initializes a builder to create the * {@link TermsEnumRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum">Documentation * on elastic.co */ @@ -2706,9 +6264,59 @@ public final TermsEnumResponse termsEnum(Function * Get information and statistics about terms in the fields of a particular * document. + *

+ * You can retrieve term vectors for documents stored in the index or for + * artificial documents passed in the body of the request. You can specify the + * fields you are interested in through the fields parameter or by + * adding the fields to the request body. For example: + * + *

+	 * GET /my-index-000001/_termvectors/1?fields=message
+	 * 
+	 * 
+ *

+ * Fields can be specified using wildcards, similar to the multi match query. + *

+ * Term vectors are real-time by default, not near real-time. This can be + * changed by setting realtime parameter to false. + *

+ * You can request three types of values: term information, term + * statistics, and field statistics. By default, all term + * information and field statistics are returned for all fields but term + * statistics are excluded. + *

+ * Term information + *

    + *
  • term frequency in the field (always returned)
  • + *
  • term positions (positions: true)
  • + *
  • start and end offsets (offsets: true)
  • + *
  • term payloads (payloads: true), as base64 encoded bytes
  • + *
+ *

+ * If the requested information wasn't stored in the index, it will be computed + * on the fly if possible. Additionally, term vectors could be computed for + * documents not even existing in the index, but instead provided by the user. + *

+ *

+ * warn Start and end offsets assume UTF-16 encoding is being used. If you want + * to use these offsets in order to get the original text that produced this + * token, you should make sure that the string you are taking a sub-string of is + * also encoded using UTF-16. + *

+ *
+ *

+ * Behaviour + *

+ * The term and field statistics are not accurate. Deleted documents are not + * taken into account. The information is only retrieved for the shard the + * requested document resides in. The term and field statistics are therefore + * only useful as relative measures whereas the absolute numbers have no meaning + * in this context. By default, when requesting term vectors of artificial + * documents, a shard to get the statistics from is randomly selected. Use + * routing only to hit a particular shard. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors">Documentation * on elastic.co */ @@ -2725,12 +6333,62 @@ public TermvectorsResponse termvectors(TermvectorsRequest *

* Get information and statistics about terms in the fields of a particular * document. + *

+ * You can retrieve term vectors for documents stored in the index or for + * artificial documents passed in the body of the request. You can specify the + * fields you are interested in through the fields parameter or by + * adding the fields to the request body. For example: + * + *

+	 * GET /my-index-000001/_termvectors/1?fields=message
+	 * 
+	 * 
+ *

+ * Fields can be specified using wildcards, similar to the multi match query. + *

+ * Term vectors are real-time by default, not near real-time. This can be + * changed by setting realtime parameter to false. + *

+ * You can request three types of values: term information, term + * statistics, and field statistics. By default, all term + * information and field statistics are returned for all fields but term + * statistics are excluded. + *

+ * Term information + *

    + *
  • term frequency in the field (always returned)
  • + *
  • term positions (positions: true)
  • + *
  • start and end offsets (offsets: true)
  • + *
  • term payloads (payloads: true), as base64 encoded bytes
  • + *
+ *

+ * If the requested information wasn't stored in the index, it will be computed + * on the fly if possible. Additionally, term vectors could be computed for + * documents not even existing in the index, but instead provided by the user. + *

+ *

+ * warn Start and end offsets assume UTF-16 encoding is being used. If you want + * to use these offsets in order to get the original text that produced this + * token, you should make sure that the string you are taking a sub-string of is + * also encoded using UTF-16. + *

+ *
+ *

+ * Behaviour + *

+ * The term and field statistics are not accurate. Deleted documents are not + * taken into account. The information is only retrieved for the shard the + * requested document resides in. The term and field statistics are therefore + * only useful as relative measures whereas the absolute numbers have no meaning + * in this context. By default, when requesting term vectors of artificial + * documents, a shard to get the statistics from is randomly selected. Use + * routing only to hit a particular shard. * * @param fn * a function that initializes a builder to create the * {@link TermvectorsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors">Documentation * on elastic.co */ @@ -2743,11 +6401,36 @@ public final TermvectorsResponse termvectors( // ----- Endpoint: update /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2763,14 +6446,39 @@ public UpdateResponse update( } /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2781,11 +6489,36 @@ public final UpdateResponse update( } /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2801,14 +6534,39 @@ public UpdateResponse update( } /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @param fn * a function that initializes a builder to create the * {@link UpdateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update">Documentation * on elastic.co */ @@ -2825,9 +6583,142 @@ public final UpdateResponse update( * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • index or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. + *

+ * When you submit an update by query request, Elasticsearch gets a snapshot of + * the data stream or index when it begins processing the request and updates + * matching documents using internal versioning. When the versions match, the + * document is updated and the version number is incremented. If a document + * changes between the time that the snapshot is taken and the update operation + * is processed, it results in a version conflict and the operation fails. You + * can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts, the operation could attempt to update more + * documents from the source than max_docs until it has + * successfully updated max_docs documents or it has gone through + * every document in the source query. + *

+ * NOTE: Documents with a version equal to 0 cannot be updated using update by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing an update by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents. A bulk + * update request is performed for each batch of matching documents. Any query + * or update failures cause the update by query request to fail and the failures + * are shown in the response. Any update requests that completed successfully + * still stick, they are not rolled back. + *

+ * Throttling update requests + *

+ * To control the rate at which update by query issues batches of update + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to turn off + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is set to + * 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch sizes cause + * Elasticsearch to create many requests and wait before starting the next set. + * This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Update by query supports sliced scroll to parallelize the update process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto chooses a reasonable number + * for most data streams and indices. This setting will use one slice per shard, + * up to a certain limit. If there are multiple source data streams or indices, + * it will choose the number of slices based on the index or backing index with + * the smallest number of shards. + *

+ * Adding slices to _update_by_query just automates + * the manual process of creating sub-requests, which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a perfectly even + * portion of the documents. All documents will be addressed, but some slices + * may be larger than others. Expect larger slices to have a more even + * distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed proportionally + * to each sub-request. Combine that with the point above about distribution + * being uneven and you should conclude that using max_docs with + * slices might not result in exactly max_docs + * documents being updated.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices hurts + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead.
  • + *
  • Update performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or update performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Update the document source + *

+ * Update by query supports scripts to update the document source. As with the + * update API, you can set ctx.op to change the operation that is + * performed. + *

+ * Set ctx.op = "noop" if your script decides that it + * doesn't have to make any changes. The update by query operation skips + * updating the document and increments the noop counter. + *

+ * Set ctx.op = "delete" if your script decides that the + * document should be deleted. The update by query operation deletes the + * document and increments the deleted counter. + *

+ * Update by query supports only index, noop, and + * delete. Setting ctx.op to anything else is an + * error. Setting any other field in ctx is an error. This API + * enables you to only modify the source of matching documents; you cannot move + * them. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query">Documentation * on elastic.co */ @@ -2844,12 +6735,145 @@ public UpdateByQueryResponse updateByQuery(UpdateByQueryRequest request) * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • index or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. + *

+ * When you submit an update by query request, Elasticsearch gets a snapshot of + * the data stream or index when it begins processing the request and updates + * matching documents using internal versioning. When the versions match, the + * document is updated and the version number is incremented. If a document + * changes between the time that the snapshot is taken and the update operation + * is processed, it results in a version conflict and the operation fails. You + * can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts, the operation could attempt to update more + * documents from the source than max_docs until it has + * successfully updated max_docs documents or it has gone through + * every document in the source query. + *

+ * NOTE: Documents with a version equal to 0 cannot be updated using update by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing an update by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents. A bulk + * update request is performed for each batch of matching documents. Any query + * or update failures cause the update by query request to fail and the failures + * are shown in the response. Any update requests that completed successfully + * still stick, they are not rolled back. + *

+ * Throttling update requests + *

+ * To control the rate at which update by query issues batches of update + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to turn off + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is set to + * 500: + * + *

+	 * target_time = 1000 / 500 per second = 2 seconds
+	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+	 * 
+	 * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch sizes cause + * Elasticsearch to create many requests and wait before starting the next set. + * This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Update by query supports sliced scroll to parallelize the update process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto chooses a reasonable number + * for most data streams and indices. This setting will use one slice per shard, + * up to a certain limit. If there are multiple source data streams or indices, + * it will choose the number of slices based on the index or backing index with + * the smallest number of shards. + *

+ * Adding slices to _update_by_query just automates + * the manual process of creating sub-requests, which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a perfectly even + * portion of the documents. All documents will be addressed, but some slices + * may be larger than others. Expect larger slices to have a more even + * distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed proportionally + * to each sub-request. Combine that with the point above about distribution + * being uneven and you should conclude that using max_docs with + * slices might not result in exactly max_docs + * documents being updated.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices hurts + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead.
  • + *
  • Update performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or update performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Update the document source + *

+ * Update by query supports scripts to update the document source. As with the + * update API, you can set ctx.op to change the operation that is + * performed. + *

+ * Set ctx.op = "noop" if your script decides that it + * doesn't have to make any changes. The update by query operation skips + * updating the document and increments the noop counter. + *

+ * Set ctx.op = "delete" if your script decides that the + * document should be deleted. The update by query operation deletes the + * document and increments the deleted counter. + *

+ * Update by query supports only index, noop, and + * delete. Setting ctx.op to anything else is an + * error. Setting any other field in ctx is an error. This API + * enables you to only modify the source of matching documents; you cannot move + * them. * * @param fn * a function that initializes a builder to create the * {@link UpdateByQueryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query">Documentation * on elastic.co */ @@ -2870,7 +6894,7 @@ public final UpdateByQueryResponse updateByQuery( * current batch to prevent scroll timeouts. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle">Documentation * on elastic.co */ @@ -2894,7 +6918,7 @@ public UpdateByQueryRethrottleResponse updateByQueryRethrottle(UpdateByQueryReth * a function that initializes a builder to create the * {@link UpdateByQueryRethrottleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlHelper.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlHelper.java index 26597ec87..7e3a3666f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlHelper.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlHelper.java @@ -22,8 +22,8 @@ import co.elastic.clients.elasticsearch._types.FieldValue; import co.elastic.clients.elasticsearch.esql.ElasticsearchEsqlAsyncClient; import co.elastic.clients.elasticsearch.esql.ElasticsearchEsqlClient; +import co.elastic.clients.elasticsearch.esql.EsqlFormat; import co.elastic.clients.elasticsearch.esql.QueryRequest; -import co.elastic.clients.elasticsearch.esql.query.EsqlFormat; import co.elastic.clients.json.JsonData; import co.elastic.clients.transport.endpoints.BinaryResponse; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ErrorCause.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ErrorCause.java index 4a9156c7d..7bd4d8549 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ErrorCause.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ErrorCause.java @@ -121,7 +121,7 @@ public final String type() { } /** - * A human-readable explanation of the error, in english + * A human-readable explanation of the error, in English. *

* API name: {@code reason} */ @@ -287,7 +287,7 @@ public final Builder type(@Nullable String value) { } /** - * A human-readable explanation of the error, in english + * A human-readable explanation of the error, in English. *

* API name: {@code reason} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ExpandWildcard.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ExpandWildcard.java index 29e214ce6..5bd02e65c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ExpandWildcard.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ExpandWildcard.java @@ -62,8 +62,8 @@ public enum ExpandWildcard implements JsonEnum { Closed("closed"), /** - * Match hidden data streams and hidden indices. Must be combined with open, - * closed, or both. + * Match hidden data streams and hidden indices. Must be combined with + * open, closed, or both. */ Hidden("hidden"), diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/KnnQuery.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/KnnQuery.java index 66ebb9d8c..d619f5988 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/KnnQuery.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/KnnQuery.java @@ -80,6 +80,9 @@ public class KnnQuery extends QueryBase implements QueryVariant { @Nullable private final Float similarity; + @Nullable + private final RescoreVector rescoreVector; + // --------------------------------------------------------------------------------------------- private KnnQuery(Builder builder) { @@ -92,6 +95,7 @@ private KnnQuery(Builder builder) { this.k = builder.k; this.filter = ApiTypeHelper.unmodifiable(builder.filter); this.similarity = builder.similarity; + this.rescoreVector = builder.rescoreVector; } @@ -175,6 +179,16 @@ public final Float similarity() { return this.similarity; } + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + @Nullable + public final RescoreVector rescoreVector() { + return this.rescoreVector; + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { super.serializeInternal(generator, mapper); @@ -221,6 +235,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.similarity); } + if (this.rescoreVector != null) { + generator.writeKey("rescore_vector"); + this.rescoreVector.serialize(generator, mapper); + + } } @@ -251,6 +270,9 @@ public static class Builder extends QueryBase.AbstractBuilder implement @Nullable private Float similarity; + @Nullable + private RescoreVector rescoreVector; + /** * Required - The name of the vector field to search against *

@@ -372,6 +394,25 @@ public final Builder similarity(@Nullable Float value) { return this; } + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(@Nullable RescoreVector value) { + this.rescoreVector = value; + return this; + } + + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(Function> fn) { + return this.rescoreVector(fn.apply(new RescoreVector.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -408,6 +449,7 @@ protected static void setupKnnQueryDeserializer(ObjectDeserializer + * API name: {@code rescore_vector} + */ + @Nullable + public final RescoreVector rescoreVector() { + return this.rescoreVector; + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { super.serializeInternal(generator, mapper); @@ -189,6 +203,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.similarity); } + if (this.rescoreVector != null) { + generator.writeKey("rescore_vector"); + this.rescoreVector.serialize(generator, mapper); + + } } @@ -214,6 +233,9 @@ public static class Builder extends RetrieverBase.AbstractBuilder imple @Nullable private Float similarity; + @Nullable + private RescoreVector rescoreVector; + /** * Required - The name of the vector field to search against. *

@@ -302,6 +324,25 @@ public final Builder similarity(@Nullable Float value) { return this; } + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(@Nullable RescoreVector value) { + this.rescoreVector = value; + return this; + } + + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(Function> fn) { + return this.rescoreVector(fn.apply(new RescoreVector.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -337,6 +378,7 @@ protected static void setupKnnRetrieverDeserializer(ObjectDeserializer + * API name: {@code rescore_vector} + */ + @Nullable + public final RescoreVector rescoreVector() { + return this.rescoreVector; + } + /** * Serialize this object to JSON. */ @@ -260,6 +274,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.innerHits.serialize(generator, mapper); } + if (this.rescoreVector != null) { + generator.writeKey("rescore_vector"); + this.rescoreVector.serialize(generator, mapper); + + } } @@ -301,6 +320,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement @Nullable private InnerHits innerHits; + @Nullable + private RescoreVector rescoreVector; + /** * Required - The name of the vector field to search against *

@@ -451,6 +473,25 @@ public final Builder innerHits(Function + * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(@Nullable RescoreVector value) { + this.rescoreVector = value; + return this; + } + + /** + * Apply oversampling and rescoring to quantized vectors * + *

+ * API name: {@code rescore_vector} + */ + public final Builder rescoreVector(Function> fn) { + return this.rescoreVector(fn.apply(new RescoreVector.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -489,6 +530,7 @@ protected static void setupKnnSearchDeserializer(ObjectDeserializerAPI + * specification + */ +@JsonpDeserializable +public class RescoreVector implements JsonpSerializable { + private final float oversample; + + // --------------------------------------------------------------------------------------------- + + private RescoreVector(Builder builder) { + + this.oversample = ApiTypeHelper.requireNonNull(builder.oversample, this, "oversample"); + + } + + public static RescoreVector of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - Applies the specified oversample factor to k on the approximate + * kNN search + *

+ * API name: {@code oversample} + */ + public final float oversample() { + return this.oversample; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("oversample"); + generator.write(this.oversample); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link RescoreVector}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Float oversample; + + /** + * Required - Applies the specified oversample factor to k on the approximate + * kNN search + *

+ * API name: {@code oversample} + */ + public final Builder oversample(float value) { + this.oversample = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link RescoreVector}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public RescoreVector build() { + _checkSingleUse(); + + return new RescoreVector(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link RescoreVector} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + RescoreVector::setupRescoreVectorDeserializer); + + protected static void setupRescoreVectorDeserializer(ObjectDeserializer op) { + + op.add(Builder::oversample, JsonpDeserializer.floatDeserializer(), "oversample"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retries.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retries.java index d81d2dc77..b916a744a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retries.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retries.java @@ -76,14 +76,18 @@ public static Retries of(Function> fn) { } /** - * Required - API name: {@code bulk} + * Required - The number of bulk actions retried. + *

+ * API name: {@code bulk} */ public final long bulk() { return this.bulk; } /** - * Required - API name: {@code search} + * Required - The number of search actions retried. + *

+ * API name: {@code search} */ public final long search() { return this.search; @@ -125,7 +129,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Long search; /** - * Required - API name: {@code bulk} + * Required - The number of bulk actions retried. + *

+ * API name: {@code bulk} */ public final Builder bulk(long value) { this.bulk = value; @@ -133,7 +139,9 @@ public final Builder bulk(long value) { } /** - * Required - API name: {@code search} + * Required - The number of search actions retried. + *

+ * API name: {@code search} */ public final Builder search(long value) { this.search = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ShardStatistics.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ShardStatistics.java index 41eb3c418..59ab1ff62 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ShardStatistics.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ShardStatistics.java @@ -88,14 +88,17 @@ public static ShardStatistics of(Function + * API name: {@code failed} */ public final Number failed() { return this.failed; } /** - * Required - Indicates how many shards have successfully run the search. + * Required - The number of shards the operation or search succeeded on. *

* API name: {@code successful} */ @@ -104,7 +107,7 @@ public final Number successful() { } /** - * Required - Indicates how many shards the search will run on overall. + * Required - The number of shards the operation or search will run on overall. *

* API name: {@code total} */ @@ -190,7 +193,10 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Number skipped; /** - * Required - API name: {@code failed} + * Required - The number of shards the operation or search attempted to run on + * but failed. + *

+ * API name: {@code failed} */ public final Builder failed(Number value) { this.failed = value; @@ -198,7 +204,7 @@ public final Builder failed(Number value) { } /** - * Required - Indicates how many shards have successfully run the search. + * Required - The number of shards the operation or search succeeded on. *

* API name: {@code successful} */ @@ -208,7 +214,7 @@ public final Builder successful(Number value) { } /** - * Required - Indicates how many shards the search will run on overall. + * Required - The number of shards the operation or search will run on overall. *

* API name: {@code total} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SortOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SortOptions.java index 83d147576..2fa114e2e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SortOptions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SortOptions.java @@ -59,7 +59,7 @@ /** * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html">Documentation * on elastic.co * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/StoredScript.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/StoredScript.java index e4bb2de82..09da15815 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/StoredScript.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/StoredScript.java @@ -81,7 +81,8 @@ public static StoredScript of(Function> fn) } /** - * Required - Specifies the language the script is written in. + * Required - The language the script is written in. For serach templates, use + * mustache. *

* API name: {@code lang} */ @@ -97,7 +98,8 @@ public final Map options() { } /** - * Required - The script source. + * Required - The script source. For search templates, an object containing the + * search template. *

* API name: {@code source} */ @@ -155,7 +157,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private String source; /** - * Required - Specifies the language the script is written in. + * Required - The language the script is written in. For serach templates, use + * mustache. *

* API name: {@code lang} */ @@ -165,7 +168,8 @@ public final Builder lang(String value) { } /** - * Required - Specifies the language the script is written in. + * Required - The language the script is written in. For serach templates, use + * mustache. *

* API name: {@code lang} */ @@ -195,7 +199,8 @@ public final Builder options(String key, String value) { } /** - * Required - The script source. + * Required - The script source. For search templates, an object containing the + * search template. *

* API name: {@code source} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/VersionType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/VersionType.java index ca05b9a43..d14251d25 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/VersionType.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/VersionType.java @@ -52,19 +52,23 @@ public enum VersionType implements JsonEnum { Internal("internal"), /** - * Only index the document if the given version is strictly higher than the + * Only index the document if the specified version is strictly higher than the * version of the stored document or if there is no existing document. */ External("external"), /** - * Only index the document if the given version is equal or higher than the - * version of the stored document or if there is no existing document. Note: the - * external_gte version type is meant for special use cases and should be used - * with care. If used incorrectly, it can result in loss of data. + * Only index the document if the specified version is equal or higher than the + * version of the stored document or if there is no existing document. NOTE: The + * external_gte version type is meant for special use cases and + * should be used with care. If used incorrectly, it can result in loss of data. */ ExternalGte("external_gte"), + /** + * This option is deprecated because it can cause primary and replica shards to + * diverge. + */ Force("force"), ; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/WriteResponseBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/WriteResponseBase.java index 4cbff9266..d64ca6ce9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/WriteResponseBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/WriteResponseBase.java @@ -96,20 +96,26 @@ protected WriteResponseBase(AbstractBuilder builder) { } /** - * Required - API name: {@code _id} + * Required - The unique identifier for the added document. + *

+ * API name: {@code _id} */ public final String id() { return this.id; } /** - * Required - API name: {@code _index} + * Required - The name of the index the document was added to. + *

+ * API name: {@code _index} */ public final String index() { return this.index; } /** + * The primary term assigned to the document for the indexing operation. + *

* API name: {@code _primary_term} */ @Nullable @@ -118,13 +124,20 @@ public final Long primaryTerm() { } /** - * Required - API name: {@code result} + * Required - The result of the indexing operation: created or + * updated. + *

+ * API name: {@code result} */ public final Result result() { return this.result; } /** + * The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't + * overwrite a newer version. + *

* API name: {@code _seq_no} */ @Nullable @@ -133,14 +146,19 @@ public final Long seqNo() { } /** - * Required - API name: {@code _shards} + * Required - Information about the replication process of the operation. + *

+ * API name: {@code _shards} */ public final ShardStatistics shards() { return this.shards; } /** - * Required - API name: {@code _version} + * Required - The document version, which is incremented each time the document + * is updated. + *

+ * API name: {@code _version} */ public final long version() { return this.version; @@ -225,7 +243,9 @@ public abstract static class AbstractBuilder + * API name: {@code _id} */ public final BuilderT id(String value) { this.id = value; @@ -233,7 +253,9 @@ public final BuilderT id(String value) { } /** - * Required - API name: {@code _index} + * Required - The name of the index the document was added to. + *

+ * API name: {@code _index} */ public final BuilderT index(String value) { this.index = value; @@ -241,6 +263,8 @@ public final BuilderT index(String value) { } /** + * The primary term assigned to the document for the indexing operation. + *

* API name: {@code _primary_term} */ public final BuilderT primaryTerm(@Nullable Long value) { @@ -249,7 +273,10 @@ public final BuilderT primaryTerm(@Nullable Long value) { } /** - * Required - API name: {@code result} + * Required - The result of the indexing operation: created or + * updated. + *

+ * API name: {@code result} */ public final BuilderT result(Result value) { this.result = value; @@ -257,6 +284,10 @@ public final BuilderT result(Result value) { } /** + * The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't + * overwrite a newer version. + *

* API name: {@code _seq_no} */ public final BuilderT seqNo(@Nullable Long value) { @@ -265,7 +296,9 @@ public final BuilderT seqNo(@Nullable Long value) { } /** - * Required - API name: {@code _shards} + * Required - Information about the replication process of the operation. + *

+ * API name: {@code _shards} */ public final BuilderT shards(ShardStatistics value) { this.shards = value; @@ -273,14 +306,19 @@ public final BuilderT shards(ShardStatistics value) { } /** - * Required - API name: {@code _shards} + * Required - Information about the replication process of the operation. + *

+ * API name: {@code _shards} */ public final BuilderT shards(Function> fn) { return this.shards(fn.apply(new ShardStatistics.Builder()).build()); } /** - * Required - API name: {@code _version} + * Required - The document version, which is incremented each time the document + * is updated. + *

+ * API name: {@code _version} */ public final BuilderT version(long value) { this.version = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/analysis/Normalizer.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/analysis/Normalizer.java index 899363eb8..9112a7510 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/analysis/Normalizer.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/analysis/Normalizer.java @@ -57,7 +57,7 @@ /** * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-normalizers.html">Documentation * on elastic.co * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/CountedKeywordProperty.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/CountedKeywordProperty.java new file mode 100644 index 000000000..07003d858 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/CountedKeywordProperty.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types.mapping; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.mapping.CountedKeywordProperty + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class CountedKeywordProperty extends PropertyBase implements PropertyVariant { + @Nullable + private final Boolean index; + + // --------------------------------------------------------------------------------------------- + + private CountedKeywordProperty(Builder builder) { + super(builder); + + this.index = builder.index; + + } + + public static CountedKeywordProperty of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Property variant kind. + */ + @Override + public Property.Kind _propertyKind() { + return Property.Kind.CountedKeyword; + } + + /** + * API name: {@code index} + */ + @Nullable + public final Boolean index() { + return this.index; + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.write("type", "counted_keyword"); + super.serializeInternal(generator, mapper); + if (this.index != null) { + generator.writeKey("index"); + generator.write(this.index); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link CountedKeywordProperty}. + */ + + public static class Builder extends PropertyBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Boolean index; + + /** + * API name: {@code index} + */ + public final Builder index(@Nullable Boolean value) { + this.index = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link CountedKeywordProperty}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public CountedKeywordProperty build() { + _checkSingleUse(); + + return new CountedKeywordProperty(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link CountedKeywordProperty} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, CountedKeywordProperty::setupCountedKeywordPropertyDeserializer); + + protected static void setupCountedKeywordPropertyDeserializer( + ObjectDeserializer op) { + PropertyBase.setupPropertyBaseDeserializer(op); + op.add(Builder::index, JsonpDeserializer.booleanDeserializer(), "index"); + + op.ignore("type"); + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/FieldType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/FieldType.java index 1e390159f..ce69d5d9f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/FieldType.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/FieldType.java @@ -127,6 +127,8 @@ public enum FieldType implements JsonEnum { ConstantKeyword("constant_keyword"), + CountedKeyword("counted_keyword"), + AggregateMetricDouble("aggregate_metric_double"), DenseVector("dense_vector"), diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/GeoShapeProperty.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/GeoShapeProperty.java index 10465f7e2..4ecbb82a1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/GeoShapeProperty.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/GeoShapeProperty.java @@ -53,7 +53,7 @@ * searching with arbitrary geo shapes such as rectangles and polygons. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/Property.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/Property.java index 7c93e998b..ac97dfac2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/Property.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/Property.java @@ -84,6 +84,8 @@ public enum Kind implements JsonEnum { ConstantKeyword("constant_keyword"), + CountedKeyword("counted_keyword"), + DateNanos("date_nanos"), Date("date"), @@ -336,6 +338,24 @@ public ConstantKeywordProperty constantKeyword() { return TaggedUnionUtils.get(this, Kind.ConstantKeyword); } + /** + * Is this variant instance of kind {@code counted_keyword}? + */ + public boolean isCountedKeyword() { + return _kind == Kind.CountedKeyword; + } + + /** + * Get the {@code counted_keyword} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code counted_keyword} + * kind. + */ + public CountedKeywordProperty countedKeyword() { + return TaggedUnionUtils.get(this, Kind.CountedKeyword); + } + /** * Is this variant instance of kind {@code date_nanos}? */ @@ -1201,6 +1221,17 @@ public ObjectBuilder constantKeyword( return this.constantKeyword(fn.apply(new ConstantKeywordProperty.Builder()).build()); } + public ObjectBuilder countedKeyword(CountedKeywordProperty v) { + this._kind = Kind.CountedKeyword; + this._value = v; + return this; + } + + public ObjectBuilder countedKeyword( + Function> fn) { + return this.countedKeyword(fn.apply(new CountedKeywordProperty.Builder()).build()); + } + public ObjectBuilder dateNanos(DateNanosProperty v) { this._kind = Kind.DateNanos; this._value = v; @@ -1705,6 +1736,7 @@ protected static void setupPropertyDeserializer(ObjectDeserializer op) op.add(Builder::byte_, ByteNumberProperty._DESERIALIZER, "byte"); op.add(Builder::completion, CompletionProperty._DESERIALIZER, "completion"); op.add(Builder::constantKeyword, ConstantKeywordProperty._DESERIALIZER, "constant_keyword"); + op.add(Builder::countedKeyword, CountedKeywordProperty._DESERIALIZER, "counted_keyword"); op.add(Builder::dateNanos, DateNanosProperty._DESERIALIZER, "date_nanos"); op.add(Builder::date, DateProperty._DESERIALIZER, "date"); op.add(Builder::dateRange, DateRangeProperty._DESERIALIZER, "date_range"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBase.java index bb366a817..1bfbb0919 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBase.java @@ -73,6 +73,9 @@ public abstract class PropertyBase implements JsonpSerializable { private final Map fields; + @Nullable + private final SyntheticSourceKeepEnum syntheticSourceKeep; + // --------------------------------------------------------------------------------------------- protected PropertyBase(AbstractBuilder builder) { @@ -82,6 +85,7 @@ protected PropertyBase(AbstractBuilder builder) { this.ignoreAbove = builder.ignoreAbove; this.dynamic = builder.dynamic; this.fields = ApiTypeHelper.unmodifiable(builder.fields); + this.syntheticSourceKeep = builder.syntheticSourceKeep; } @@ -124,6 +128,14 @@ public final Map fields() { return this.fields; } + /** + * API name: {@code synthetic_source_keep} + */ + @Nullable + public final SyntheticSourceKeepEnum syntheticSourceKeep() { + return this.syntheticSourceKeep; + } + /** * Serialize this object to JSON. */ @@ -177,6 +189,10 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } + if (this.syntheticSourceKeep != null) { + generator.writeKey("synthetic_source_keep"); + this.syntheticSourceKeep.serialize(generator, mapper); + } } @@ -203,6 +219,9 @@ public abstract static class AbstractBuilder fields; + @Nullable + private SyntheticSourceKeepEnum syntheticSourceKeep; + /** * Metadata about the field. *

@@ -301,6 +320,14 @@ public final BuilderT fields(String key, Function> void setupProperty op.add(AbstractBuilder::ignoreAbove, JsonpDeserializer.integerDeserializer(), "ignore_above"); op.add(AbstractBuilder::dynamic, DynamicMapping._DESERIALIZER, "dynamic"); op.add(AbstractBuilder::fields, JsonpDeserializer.stringMapDeserializer(Property._DESERIALIZER), "fields"); + op.add(AbstractBuilder::syntheticSourceKeep, SyntheticSourceKeepEnum._DESERIALIZER, "synthetic_source_keep"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBuilders.java index 1b4b31212..e3ba7beab 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/PropertyBuilders.java @@ -154,6 +154,25 @@ public static Property constantKeyword( return builder.build(); } + /** + * Creates a builder for the {@link CountedKeywordProperty counted_keyword} + * {@code Property} variant. + */ + public static CountedKeywordProperty.Builder countedKeyword() { + return new CountedKeywordProperty.Builder(); + } + + /** + * Creates a Property of the {@link CountedKeywordProperty counted_keyword} + * {@code Property} variant. + */ + public static Property countedKeyword( + Function> fn) { + Property.Builder builder = new Property.Builder(); + builder.countedKeyword(fn.apply(new CountedKeywordProperty.Builder()).build()); + return builder.build(); + } + /** * Creates a builder for the {@link DateNanosProperty date_nanos} * {@code Property} variant. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/ShapeProperty.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/ShapeProperty.java index 3df6e636c..96cc9897f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/ShapeProperty.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/ShapeProperty.java @@ -54,7 +54,7 @@ * polygons. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/shape.html">Documentation * on elastic.co * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/SyntheticSourceKeepEnum.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/SyntheticSourceKeepEnum.java new file mode 100644 index 000000000..052e0aa1a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/SyntheticSourceKeepEnum.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types.mapping; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum SyntheticSourceKeepEnum implements JsonEnum { + /** + * Synthetic source diverges from the original source (default) + */ + None("none"), + + /** + * Arrays of the corresponding field or object preserve the original element + * ordering and duplicate elements. The synthetic source fragment for such + * arrays is not guaranteed to match the original source exactly, e.g. array [1, + * 2, [5], [[4, [3]]], 5] may appear as-is or in an equivalent format like [1, + * 2, 5, 4, 3, 5]. The exact format may change in the future, in an effort to + * reduce the storage overhead of this option. + */ + Arrays("arrays"), + + /** + * The source for both singleton instances and arrays of the corresponding field + * or object gets recorded. When applied to objects, the source of all + * sub-objects and sub-fields gets captured. Furthermore, the original source of + * arrays gets captured and appears in synthetic source with no modifications. + */ + All("all"), + + ; + + private final String jsonValue; + + SyntheticSourceKeepEnum(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + SyntheticSourceKeepEnum.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/FieldAndFormat.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/FieldAndFormat.java index bba490ce5..69ee48150 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/FieldAndFormat.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/FieldAndFormat.java @@ -86,7 +86,7 @@ public static FieldAndFormat of(Function> } /** - * Required - Wildcard pattern. The request returns values for field names + * Required - A wildcard pattern. The request returns values for field names * matching this pattern. *

* API name: {@code field} @@ -96,7 +96,7 @@ public final String field() { } /** - * Format in which the values are returned. + * The format in which the values are returned. *

* API name: {@code format} */ @@ -161,7 +161,7 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Boolean includeUnmapped; /** - * Required - Wildcard pattern. The request returns values for field names + * Required - A wildcard pattern. The request returns values for field names * matching this pattern. *

* API name: {@code field} @@ -172,7 +172,7 @@ public final Builder field(String value) { } /** - * Format in which the values are returned. + * The format in which the values are returned. *

* API name: {@code format} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Like.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Like.java index 06082f897..d7b5050f9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Like.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Like.java @@ -60,7 +60,7 @@ * the text. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html#_document_input_parameters">Documentation * on elastic.co * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchAsyncClient.java index 8c01cb403..d1d87ecf8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchAsyncClient.java @@ -81,7 +81,7 @@ public ElasticsearchAsyncSearchAsyncClient withTransportOptions(@Nullable Transp * have the cancel_task cluster privilege. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -105,7 +105,7 @@ public CompletableFuture delete(DeleteAsyncSearchRequ * a function that initializes a builder to create the * {@link DeleteAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -125,7 +125,7 @@ public final CompletableFuture delete( * it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -151,7 +151,7 @@ public CompletableFuture> get(GetA * a function that initializes a builder to create the * {@link GetAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -170,7 +170,7 @@ public final CompletableFuture> ge * it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -196,7 +196,7 @@ public CompletableFuture> get(GetA * a function that initializes a builder to create the * {@link GetAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -216,7 +216,7 @@ public final CompletableFuture> ge * monitoring_user role. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -239,7 +239,7 @@ public CompletableFuture status(AsyncSearchStatusRequ * a function that initializes a builder to create the * {@link AsyncSearchStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -267,7 +267,7 @@ public final CompletableFuture status( * setting. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -302,7 +302,7 @@ public CompletableFuture> submit(SubmitReq * a function that initializes a builder to create the * {@link SubmitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -328,7 +328,7 @@ public final CompletableFuture> submit( * setting. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -362,7 +362,7 @@ public CompletableFuture> submit(SubmitReq * a function that initializes a builder to create the * {@link SubmitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchClient.java index e71c8284e..5bcf84d6d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/ElasticsearchAsyncSearchClient.java @@ -80,7 +80,7 @@ public ElasticsearchAsyncSearchClient withTransportOptions(@Nullable TransportOp * have the cancel_task cluster privilege. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -105,7 +105,7 @@ public DeleteAsyncSearchResponse delete(DeleteAsyncSearchRequest request) * a function that initializes a builder to create the * {@link DeleteAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -126,7 +126,7 @@ public final DeleteAsyncSearchResponse delete( * it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -152,7 +152,7 @@ public GetAsyncSearchResponse get(GetAsyncSearchRequest r * a function that initializes a builder to create the * {@link GetAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -171,7 +171,7 @@ public final GetAsyncSearchResponse get( * it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -197,7 +197,7 @@ public GetAsyncSearchResponse get(GetAsyncSearchRequest r * a function that initializes a builder to create the * {@link GetAsyncSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -218,7 +218,7 @@ public final GetAsyncSearchResponse get( * monitoring_user role. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -242,7 +242,7 @@ public AsyncSearchStatusResponse status(AsyncSearchStatusRequest request) * a function that initializes a builder to create the * {@link AsyncSearchStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -271,7 +271,7 @@ public final AsyncSearchStatusResponse status( * setting. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -306,7 +306,7 @@ public SubmitResponse submit(SubmitRequest request, Class * a function that initializes a builder to create the * {@link SubmitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -333,7 +333,7 @@ public final SubmitResponse submit( * setting. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ @@ -368,7 +368,7 @@ public SubmitResponse submit(SubmitRequest request, Type * a function that initializes a builder to create the * {@link SubmitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/SubmitRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/SubmitRequest.java index d38c35ab7..a0114bcbf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/SubmitRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/async_search/SubmitRequest.java @@ -164,6 +164,9 @@ public class SubmitRequest extends RequestBase implements JsonpSerializable { private final List> indicesBoost; + @Nullable + private final Time keepAlive; + @Nullable private final Boolean keepOnCompletion; @@ -275,6 +278,7 @@ private SubmitRequest(Builder builder) { this.ignoreUnavailable = builder.ignoreUnavailable; this.index = ApiTypeHelper.unmodifiable(builder.index); this.indicesBoost = ApiTypeHelper.unmodifiable(builder.indicesBoost); + this.keepAlive = builder.keepAlive; this.keepOnCompletion = builder.keepOnCompletion; this.knn = ApiTypeHelper.unmodifiable(builder.knn); this.lenient = builder.lenient; @@ -538,6 +542,17 @@ public final List> indicesBoost() { return this.indicesBoost; } + /** + * Specifies how long the async search needs to be available. Ongoing async + * searches and any saved search results are deleted after this period. + *

+ * API name: {@code keep_alive} + */ + @Nullable + public final Time keepAlive() { + return this.keepAlive; + } + /** * If true, results are stored for later retrieval when the search * completes within the wait_for_completion_timeout. @@ -1184,6 +1199,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private List> indicesBoost; + @Nullable + private Time keepAlive; + @Nullable private Boolean keepOnCompletion; @@ -1666,6 +1684,27 @@ public final Builder indicesBoost(Map value, Map return this; } + /** + * Specifies how long the async search needs to be available. Ongoing async + * searches and any saved search results are deleted after this period. + *

+ * API name: {@code keep_alive} + */ + public final Builder keepAlive(@Nullable Time value) { + this.keepAlive = value; + return this; + } + + /** + * Specifies how long the async search needs to be available. Ongoing async + * searches and any saved search results are deleted after this period. + *

+ * API name: {@code keep_alive} + */ + public final Builder keepAlive(Function> fn) { + return this.keepAlive(fn.apply(new Time.Builder()).build()); + } + /** * If true, results are stored for later retrieval when the search * completes within the wait_for_completion_timeout. @@ -2473,6 +2512,9 @@ protected static void setupSubmitRequestDeserializer(ObjectDeserializerDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy">Documentation * on elastic.co */ @@ -100,7 +100,7 @@ public CompletableFuture deleteAutoscalingPolic * a function that initializes a builder to create the * {@link DeleteAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy">Documentation * on elastic.co */ @@ -136,7 +136,7 @@ public final CompletableFuture deleteAutoscalin * make autoscaling decisions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -176,7 +176,7 @@ public CompletableFuture getAutoscalingCapacity( * a function that initializes a builder to create the * {@link GetAutoscalingCapacityRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -210,7 +210,7 @@ public final CompletableFuture getAutoscalingCap * make autoscaling decisions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -229,7 +229,7 @@ public CompletableFuture getAutoscalingCapacity( * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -251,7 +251,7 @@ public CompletableFuture getAutoscalingPolicy(GetA * a function that initializes a builder to create the * {@link GetAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -270,7 +270,7 @@ public final CompletableFuture getAutoscalingPolic * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy">Documentation * on elastic.co */ @@ -292,7 +292,7 @@ public CompletableFuture putAutoscalingPolicy(PutA * a function that initializes a builder to create the * {@link PutAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/autoscaling/ElasticsearchAutoscalingClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/autoscaling/ElasticsearchAutoscalingClient.java index ff756bbee..14b903122 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/autoscaling/ElasticsearchAutoscalingClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/autoscaling/ElasticsearchAutoscalingClient.java @@ -76,7 +76,7 @@ public ElasticsearchAutoscalingClient withTransportOptions(@Nullable TransportOp * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy">Documentation * on elastic.co */ @@ -99,7 +99,7 @@ public DeleteAutoscalingPolicyResponse deleteAutoscalingPolicy(DeleteAutoscaling * a function that initializes a builder to create the * {@link DeleteAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy">Documentation * on elastic.co */ @@ -136,7 +136,7 @@ public final DeleteAutoscalingPolicyResponse deleteAutoscalingPolicy( * make autoscaling decisions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -176,7 +176,7 @@ public GetAutoscalingCapacityResponse getAutoscalingCapacity(GetAutoscalingCapac * a function that initializes a builder to create the * {@link GetAutoscalingCapacityRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -211,7 +211,7 @@ public final GetAutoscalingCapacityResponse getAutoscalingCapacity( * make autoscaling decisions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -230,7 +230,7 @@ public GetAutoscalingCapacityResponse getAutoscalingCapacity() throws IOExceptio * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -253,7 +253,7 @@ public GetAutoscalingPolicyResponse getAutoscalingPolicy(GetAutoscalingPolicyReq * a function that initializes a builder to create the * {@link GetAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity">Documentation * on elastic.co */ @@ -273,7 +273,7 @@ public final GetAutoscalingPolicyResponse getAutoscalingPolicy( * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy">Documentation * on elastic.co */ @@ -296,7 +296,7 @@ public PutAutoscalingPolicyResponse putAutoscalingPolicy(PutAutoscalingPolicyReq * a function that initializes a builder to create the * {@link PutAutoscalingPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AliasesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AliasesRequest.java index ca6aeeee2..f5fdb5970 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AliasesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AliasesRequest.java @@ -58,12 +58,14 @@ // typedef: cat.aliases.Request /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @see API * specification @@ -92,8 +94,10 @@ public static AliasesRequest of(Function> } /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} */ @@ -102,7 +106,10 @@ public final List expandWildcards() { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. To indicated that the request should never timeout, you can set it to + * -1. *

* API name: {@code master_timeout} */ @@ -141,8 +148,10 @@ public static class Builder extends CatRequestBase.AbstractBuilder private List name; /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -154,8 +163,10 @@ public final Builder expandWildcards(List list) { } /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -167,7 +178,10 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. To indicated that the request should never timeout, you can set it to + * -1. *

* API name: {@code master_timeout} */ @@ -177,7 +191,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. To indicated that the request should never timeout, you can set it to + * -1. *

* API name: {@code master_timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java index 870e98ab1..686661d29 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java @@ -59,10 +59,13 @@ // typedef: cat.allocation.Request /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @see API * specification @@ -130,8 +133,8 @@ public final Time masterTimeout() { } /** - * Comma-separated list of node identifiers or names used to limit the returned - * information. + * A comma-separated list of node identifiers or names used to limit the + * returned information. *

* API name: {@code node_id} */ @@ -204,8 +207,8 @@ public final Builder masterTimeout(Function> f } /** - * Comma-separated list of node identifiers or names used to limit the returned - * information. + * A comma-separated list of node identifiers or names used to limit the + * returned information. *

* API name: {@code node_id} *

@@ -217,8 +220,8 @@ public final Builder nodeId(List list) { } /** - * Comma-separated list of node identifiers or names used to limit the returned - * information. + * A comma-separated list of node identifiers or names used to limit the + * returned information. *

* API name: {@code node_id} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java index 556e3a40b..0e7b5bfff 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java @@ -55,12 +55,14 @@ // typedef: cat.component_templates.Request /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @see API @@ -106,7 +108,7 @@ public final Boolean local() { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. *

* API name: {@code master_timeout} */ @@ -116,8 +118,8 @@ public final Time masterTimeout() { } /** - * The name of the component template. Accepts wildcard expressions. If omitted, - * all component templates are returned. + * The name of the component template. It accepts wildcard expressions. If it is + * omitted, all component templates are returned. *

* API name: {@code name} */ @@ -159,7 +161,7 @@ public final Builder local(@Nullable Boolean value) { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. *

* API name: {@code master_timeout} */ @@ -169,7 +171,7 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. *

* API name: {@code master_timeout} */ @@ -178,8 +180,8 @@ public final Builder masterTimeout(Function> f } /** - * The name of the component template. Accepts wildcard expressions. If omitted, - * all component templates are returned. + * The name of the component template. It accepts wildcard expressions. If it is + * omitted, all component templates are returned. *

* API name: {@code name} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/CountRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/CountRequest.java index a1d6203c5..75501d8c7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/CountRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/CountRequest.java @@ -56,13 +56,14 @@ // typedef: cat.count.Request /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @see API @@ -85,9 +86,10 @@ public static CountRequest of(Function> fn) } /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and - * indices, omit this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. It supports wildcards (*). To target all data + * streams and indices, omit this parameter or use * or + * _all. *

* API name: {@code index} */ @@ -106,9 +108,10 @@ public static class Builder extends CatRequestBase.AbstractBuilder impl private List index; /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and - * indices, omit this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. It supports wildcards (*). To target all data + * streams and indices, omit this parameter or use * or + * _all. *

* API name: {@code index} *

@@ -120,9 +123,10 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and - * indices, omit this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. It supports wildcards (*). To target all data + * streams and indices, omit this parameter or use * or + * _all. *

* API name: {@code index} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java index c10250bbb..3057e2c41 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java @@ -67,15 +67,17 @@ public ElasticsearchCatAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: cat.aliases /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -87,18 +89,20 @@ public CompletableFuture aliases(AliasesRequest request) { } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @param fn * a function that initializes a builder to create the * {@link AliasesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -108,15 +112,17 @@ public final CompletableFuture aliases( } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -128,13 +134,16 @@ public CompletableFuture aliases() { // ----- Endpoint: cat.allocation /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -146,16 +155,19 @@ public CompletableFuture allocation(AllocationRequest reques } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @param fn * a function that initializes a builder to create the * {@link AllocationRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -165,13 +177,16 @@ public final CompletableFuture allocation( } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -183,16 +198,18 @@ public CompletableFuture allocation() { // ----- Endpoint: cat.component_templates /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -204,19 +221,21 @@ public CompletableFuture componentTemplates(Componen } /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @param fn * a function that initializes a builder to create the * {@link ComponentTemplatesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -226,16 +245,18 @@ public final CompletableFuture componentTemplates( } /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -247,17 +268,18 @@ public CompletableFuture componentTemplates() { // ----- Endpoint: cat.count /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -269,20 +291,21 @@ public CompletableFuture count(CountRequest request) { } /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @param fn * a function that initializes a builder to create the * {@link CountRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -292,17 +315,18 @@ public final CompletableFuture count( } /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -314,14 +338,17 @@ public CompletableFuture count() { // ----- Endpoint: cat.fielddata /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -333,17 +360,20 @@ public CompletableFuture fielddata(FielddataRequest request) } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @param fn * a function that initializes a builder to create the * {@link FielddataRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -353,14 +383,17 @@ public final CompletableFuture fielddata( } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -372,21 +405,22 @@ public CompletableFuture fielddata() { // ----- Endpoint: cat.health /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -398,24 +432,25 @@ public CompletableFuture health(HealthRequest request) { } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @param fn * a function that initializes a builder to create the * {@link HealthRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -425,21 +460,22 @@ public final CompletableFuture health( } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -451,10 +487,12 @@ public CompletableFuture health() { // ----- Endpoint: cat.help /** - * Get CAT help. Returns help for the CAT APIs. + * Get CAT help. + *

+ * Get help for the CAT APIs. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat">Documentation * on elastic.co */ public CompletableFuture help() { @@ -464,8 +502,10 @@ public CompletableFuture help() { // ----- Endpoint: cat.indices /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -487,7 +527,7 @@ public CompletableFuture help() { * application consumption, use an index endpoint. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -499,8 +539,10 @@ public CompletableFuture indices(IndicesRequest request) { } /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -525,7 +567,7 @@ public CompletableFuture indices(IndicesRequest request) { * a function that initializes a builder to create the * {@link IndicesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -535,8 +577,10 @@ public final CompletableFuture indices( } /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -558,7 +602,7 @@ public final CompletableFuture indices( * application consumption, use an index endpoint. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -570,14 +614,17 @@ public CompletableFuture indices() { // ----- Endpoint: cat.master /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -589,17 +636,20 @@ public CompletableFuture master(MasterRequest request) { } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link MasterRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -609,14 +659,17 @@ public final CompletableFuture master( } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -628,15 +681,17 @@ public CompletableFuture master() { // ----- Endpoint: cat.ml_data_frame_analytics /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -648,18 +703,20 @@ public CompletableFuture mlDataFrameAnalytics(MlDa } /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @param fn * a function that initializes a builder to create the * {@link MlDataFrameAnalyticsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -669,15 +726,17 @@ public final CompletableFuture mlDataFrameAnalytic } /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -689,18 +748,20 @@ public CompletableFuture mlDataFrameAnalytics() { // ----- Endpoint: cat.ml_datafeeds /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -712,21 +773,23 @@ public CompletableFuture mlDatafeeds(MlDatafeedsRequest req } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlDatafeedsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -736,18 +799,20 @@ public final CompletableFuture mlDatafeeds( } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -759,18 +824,20 @@ public CompletableFuture mlDatafeeds() { // ----- Endpoint: cat.ml_jobs /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -782,21 +849,23 @@ public CompletableFuture mlJobs(MlJobsRequest request) { } /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlJobsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -806,18 +875,20 @@ public final CompletableFuture mlJobs( } /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -829,15 +900,16 @@ public CompletableFuture mlJobs() { // ----- Endpoint: cat.ml_trained_models /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -849,18 +921,19 @@ public CompletableFuture mlTrainedModels(MlTrainedModel } /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlTrainedModelsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -870,15 +943,16 @@ public final CompletableFuture mlTrainedModels( } /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -890,13 +964,15 @@ public CompletableFuture mlTrainedModels() { // ----- Endpoint: cat.nodeattrs /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -908,16 +984,18 @@ public CompletableFuture nodeattrs(NodeattrsRequest request) } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link NodeattrsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -927,13 +1005,15 @@ public final CompletableFuture nodeattrs( } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -945,13 +1025,15 @@ public CompletableFuture nodeattrs() { // ----- Endpoint: cat.nodes /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -963,16 +1045,18 @@ public CompletableFuture nodes(NodesRequest request) { } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link NodesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -982,13 +1066,15 @@ public final CompletableFuture nodes( } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -1000,14 +1086,15 @@ public CompletableFuture nodes() { // ----- Endpoint: cat.pending_tasks /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1019,17 +1106,18 @@ public CompletableFuture pendingTasks(PendingTasksRequest } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @param fn * a function that initializes a builder to create the * {@link PendingTasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1039,14 +1127,15 @@ public final CompletableFuture pendingTasks( } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1058,13 +1147,15 @@ public CompletableFuture pendingTasks() { // ----- Endpoint: cat.plugins /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1076,16 +1167,18 @@ public CompletableFuture plugins(PluginsRequest request) { } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link PluginsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1095,13 +1188,15 @@ public final CompletableFuture plugins( } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1113,18 +1208,19 @@ public CompletableFuture plugins() { // ----- Endpoint: cat.recovery /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1136,21 +1232,22 @@ public CompletableFuture recovery(RecoveryRequest request) { } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @param fn * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1160,18 +1257,19 @@ public final CompletableFuture recovery( } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1183,14 +1281,15 @@ public CompletableFuture recovery() { // ----- Endpoint: cat.repositories /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1202,17 +1301,18 @@ public CompletableFuture repositories(RepositoriesRequest } /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @param fn * a function that initializes a builder to create the * {@link RepositoriesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1222,14 +1322,15 @@ public final CompletableFuture repositories( } /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1241,14 +1342,16 @@ public CompletableFuture repositories() { // ----- Endpoint: cat.segments /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1260,17 +1363,19 @@ public CompletableFuture segments(SegmentsRequest request) { } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @param fn * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1280,14 +1385,16 @@ public final CompletableFuture segments( } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1299,13 +1406,15 @@ public CompletableFuture segments() { // ----- Endpoint: cat.shards /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1317,16 +1426,18 @@ public CompletableFuture shards(ShardsRequest request) { } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @param fn * a function that initializes a builder to create the * {@link ShardsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1336,13 +1447,15 @@ public final CompletableFuture shards( } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1354,15 +1467,16 @@ public CompletableFuture shards() { // ----- Endpoint: cat.snapshots /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1374,18 +1488,19 @@ public CompletableFuture snapshots(SnapshotsRequest request) } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @param fn * a function that initializes a builder to create the * {@link SnapshotsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1395,15 +1510,16 @@ public final CompletableFuture snapshots( } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1415,13 +1531,15 @@ public CompletableFuture snapshots() { // ----- Endpoint: cat.tasks /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1433,16 +1551,18 @@ public CompletableFuture tasks(TasksRequest request) { } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @param fn * a function that initializes a builder to create the * {@link TasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1452,13 +1572,15 @@ public final CompletableFuture tasks( } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1470,15 +1592,16 @@ public CompletableFuture tasks() { // ----- Endpoint: cat.templates /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1490,18 +1613,19 @@ public CompletableFuture templates(TemplatesRequest request) } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @param fn * a function that initializes a builder to create the * {@link TemplatesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1511,15 +1635,16 @@ public final CompletableFuture templates( } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1531,14 +1656,16 @@ public CompletableFuture templates() { // ----- Endpoint: cat.thread_pool /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1550,17 +1677,19 @@ public CompletableFuture threadPool(ThreadPoolRequest reques } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link ThreadPoolRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1570,14 +1699,16 @@ public final CompletableFuture threadPool( } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1589,15 +1720,16 @@ public CompletableFuture threadPool() { // ----- Endpoint: cat.transforms /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application * consumption, use the get transform statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ @@ -1609,8 +1741,9 @@ public CompletableFuture transforms(TransformsRequest reques } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1620,7 +1753,7 @@ public CompletableFuture transforms(TransformsRequest reques * a function that initializes a builder to create the * {@link TransformsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ @@ -1630,15 +1763,16 @@ public final CompletableFuture transforms( } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application * consumption, use the get transform statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java index 05a5baa0e..6a4359464 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java @@ -68,15 +68,17 @@ public ElasticsearchCatClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: cat.aliases /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -88,18 +90,20 @@ public AliasesResponse aliases(AliasesRequest request) throws IOException, Elast } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and - * routing information. The API does not return data stream aliases. + * Get aliases. *

- * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @param fn * a function that initializes a builder to create the * {@link AliasesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -109,15 +113,17 @@ public final AliasesResponse aliases(Function - * CAT APIs are only intended for human consumption using the command line or - * the Kibana console. They are not intended for use by applications. For - * application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or the Kibana console. They are not intended for use by applications. + * For application consumption, use the aliases API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases">Documentation * on elastic.co */ @@ -129,13 +135,16 @@ public AliasesResponse aliases() throws IOException, ElasticsearchException { // ----- Endpoint: cat.allocation /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -147,16 +156,19 @@ public AllocationResponse allocation(AllocationRequest request) throws IOExcepti } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Get shard allocation information. + *

+ * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @param fn * a function that initializes a builder to create the * {@link AllocationRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -166,13 +178,16 @@ public final AllocationResponse allocation(Function + * Get a snapshot of the number of shards allocated to each data node and their + * disk space. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation">Documentation * on elastic.co */ @@ -184,16 +199,18 @@ public AllocationResponse allocation() throws IOException, ElasticsearchExceptio // ----- Endpoint: cat.component_templates /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -206,19 +223,21 @@ public ComponentTemplatesResponse componentTemplates(ComponentTemplatesRequest r } /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @param fn * a function that initializes a builder to create the * {@link ComponentTemplatesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -229,16 +248,18 @@ public final ComponentTemplatesResponse componentTemplates( } /** - * Get component templates. Returns information about component templates in a - * cluster. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Get component templates. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get information about component templates in a cluster. Component templates + * are building blocks for constructing index templates that specify index + * mappings, settings, and aliases. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the get component template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates">Documentation * on elastic.co */ @@ -250,17 +271,18 @@ public ComponentTemplatesResponse componentTemplates() throws IOException, Elast // ----- Endpoint: cat.count /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -272,20 +294,21 @@ public CountResponse count(CountRequest request) throws IOException, Elasticsear } /** - * Get a document count. Provides quick access to a document count for a data - * stream, an index, or an entire cluster. The document count only includes live - * documents, not deleted documents which have not yet been removed by the merge - * process. + * Get a document count. *

- * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @param fn * a function that initializes a builder to create the * {@link CountRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -295,17 +318,18 @@ public final CountResponse count(Function - * CAT APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get quick access to a document count for a data stream, an index, or an + * entire cluster. The document count only includes live documents, not deleted + * documents which have not yet been removed by the merge process. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the count API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count">Documentation * on elastic.co */ @@ -317,14 +341,17 @@ public CountResponse count() throws IOException, ElasticsearchException { // ----- Endpoint: cat.fielddata /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -336,17 +363,20 @@ public FielddataResponse fielddata(FielddataRequest request) throws IOException, } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @param fn * a function that initializes a builder to create the * {@link FielddataRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -356,14 +386,17 @@ public final FielddataResponse fielddata(Function + * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata">Documentation * on elastic.co */ @@ -375,21 +408,22 @@ public FielddataResponse fielddata() throws IOException, ElasticsearchException // ----- Endpoint: cat.health /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -401,24 +435,25 @@ public HealthResponse health(HealthRequest request) throws IOException, Elastics } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @param fn * a function that initializes a builder to create the * {@link HealthRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -428,21 +463,22 @@ public final HealthResponse health(FunctionHH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health">Documentation * on elastic.co */ @@ -454,10 +490,12 @@ public HealthResponse health() throws IOException, ElasticsearchException { // ----- Endpoint: cat.help /** - * Get CAT help. Returns help for the CAT APIs. + * Get CAT help. + *

+ * Get help for the CAT APIs. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat">Documentation * on elastic.co */ public HelpResponse help() throws IOException, ElasticsearchException { @@ -467,8 +505,10 @@ public HelpResponse help() throws IOException, ElasticsearchException { // ----- Endpoint: cat.indices /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -490,7 +530,7 @@ public HelpResponse help() throws IOException, ElasticsearchException { * application consumption, use an index endpoint. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -502,8 +542,10 @@ public IndicesResponse indices(IndicesRequest request) throws IOException, Elast } /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -528,7 +570,7 @@ public IndicesResponse indices(IndicesRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link IndicesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -538,8 +580,10 @@ public final IndicesResponse indices(Function + * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: @@ -561,7 +605,7 @@ public final IndicesResponse indices(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices">Documentation * on elastic.co */ @@ -573,14 +617,17 @@ public IndicesResponse indices() throws IOException, ElasticsearchException { // ----- Endpoint: cat.master /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -592,17 +639,20 @@ public MasterResponse master(MasterRequest request) throws IOException, Elastics } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link MasterRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -612,14 +662,17 @@ public final MasterResponse master(Function + * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master">Documentation * on elastic.co */ @@ -631,15 +684,17 @@ public MasterResponse master() throws IOException, ElasticsearchException { // ----- Endpoint: cat.ml_data_frame_analytics /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -652,18 +707,20 @@ public MlDataFrameAnalyticsResponse mlDataFrameAnalytics(MlDataFrameAnalyticsReq } /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @param fn * a function that initializes a builder to create the * {@link MlDataFrameAnalyticsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -674,15 +731,17 @@ public final MlDataFrameAnalyticsResponse mlDataFrameAnalytics( } /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics">Documentation * on elastic.co */ @@ -694,18 +753,20 @@ public MlDataFrameAnalyticsResponse mlDataFrameAnalytics() throws IOException, E // ----- Endpoint: cat.ml_datafeeds /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -717,21 +778,23 @@ public MlDatafeedsResponse mlDatafeeds(MlDatafeedsRequest request) throws IOExce } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlDatafeedsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -742,18 +805,20 @@ public final MlDatafeedsResponse mlDatafeeds( } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds">Documentation * on elastic.co */ @@ -765,18 +830,20 @@ public MlDatafeedsResponse mlDatafeeds() throws IOException, ElasticsearchExcept // ----- Endpoint: cat.ml_jobs /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -788,21 +855,23 @@ public MlJobsResponse mlJobs(MlJobsRequest request) throws IOException, Elastics } /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlJobsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -812,18 +881,20 @@ public final MlJobsResponse mlJobs(Functionmonitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs">Documentation * on elastic.co */ @@ -835,15 +906,16 @@ public MlJobsResponse mlJobs() throws IOException, ElasticsearchException { // ----- Endpoint: cat.ml_trained_models /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -856,18 +928,19 @@ public MlTrainedModelsResponse mlTrainedModels(MlTrainedModelsRequest request) } /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @param fn * a function that initializes a builder to create the * {@link MlTrainedModelsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -878,15 +951,16 @@ public final MlTrainedModelsResponse mlTrainedModels( } /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models">Documentation * on elastic.co */ @@ -898,13 +972,15 @@ public MlTrainedModelsResponse mlTrainedModels() throws IOException, Elasticsear // ----- Endpoint: cat.nodeattrs /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -916,16 +992,18 @@ public NodeattrsResponse nodeattrs(NodeattrsRequest request) throws IOException, } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link NodeattrsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -935,13 +1013,15 @@ public final NodeattrsResponse nodeattrs(Function + * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs">Documentation * on elastic.co */ @@ -953,13 +1033,15 @@ public NodeattrsResponse nodeattrs() throws IOException, ElasticsearchException // ----- Endpoint: cat.nodes /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -971,16 +1053,18 @@ public NodesResponse nodes(NodesRequest request) throws IOException, Elasticsear } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link NodesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -990,13 +1074,15 @@ public final NodesResponse nodes(Function + * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes">Documentation * on elastic.co */ @@ -1008,14 +1094,15 @@ public NodesResponse nodes() throws IOException, ElasticsearchException { // ----- Endpoint: cat.pending_tasks /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1027,17 +1114,18 @@ public PendingTasksResponse pendingTasks(PendingTasksRequest request) throws IOE } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @param fn * a function that initializes a builder to create the * {@link PendingTasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1048,14 +1136,15 @@ public final PendingTasksResponse pendingTasks( } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks">Documentation * on elastic.co */ @@ -1067,13 +1156,15 @@ public PendingTasksResponse pendingTasks() throws IOException, ElasticsearchExce // ----- Endpoint: cat.plugins /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1085,16 +1176,18 @@ public PluginsResponse plugins(PluginsRequest request) throws IOException, Elast } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link PluginsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1104,13 +1197,15 @@ public final PluginsResponse plugins(Function + * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins">Documentation * on elastic.co */ @@ -1122,18 +1217,19 @@ public PluginsResponse plugins() throws IOException, ElasticsearchException { // ----- Endpoint: cat.recovery /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1145,21 +1241,22 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @param fn * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1169,18 +1266,19 @@ public final RecoveryResponse recovery(Function + * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery">Documentation * on elastic.co */ @@ -1192,14 +1290,15 @@ public RecoveryResponse recovery() throws IOException, ElasticsearchException { // ----- Endpoint: cat.repositories /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1211,17 +1310,18 @@ public RepositoriesResponse repositories(RepositoriesRequest request) throws IOE } /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @param fn * a function that initializes a builder to create the * {@link RepositoriesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1232,14 +1332,15 @@ public final RepositoriesResponse repositories( } /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories">Documentation * on elastic.co */ @@ -1251,14 +1352,16 @@ public RepositoriesResponse repositories() throws IOException, ElasticsearchExce // ----- Endpoint: cat.segments /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1270,17 +1373,19 @@ public SegmentsResponse segments(SegmentsRequest request) throws IOException, El } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @param fn * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1290,14 +1395,16 @@ public final SegmentsResponse segments(Function + * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments">Documentation * on elastic.co */ @@ -1309,13 +1416,15 @@ public SegmentsResponse segments() throws IOException, ElasticsearchException { // ----- Endpoint: cat.shards /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1327,16 +1436,18 @@ public ShardsResponse shards(ShardsRequest request) throws IOException, Elastics } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @param fn * a function that initializes a builder to create the * {@link ShardsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1346,13 +1457,15 @@ public final ShardsResponse shards(Function + * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards">Documentation * on elastic.co */ @@ -1364,15 +1477,16 @@ public ShardsResponse shards() throws IOException, ElasticsearchException { // ----- Endpoint: cat.snapshots /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1384,18 +1498,19 @@ public SnapshotsResponse snapshots(SnapshotsRequest request) throws IOException, } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @param fn * a function that initializes a builder to create the * {@link SnapshotsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1405,15 +1520,16 @@ public final SnapshotsResponse snapshots(Function + * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots">Documentation * on elastic.co */ @@ -1425,13 +1541,15 @@ public SnapshotsResponse snapshots() throws IOException, ElasticsearchException // ----- Endpoint: cat.tasks /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1443,16 +1561,18 @@ public TasksResponse tasks(TasksRequest request) throws IOException, Elasticsear } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @param fn * a function that initializes a builder to create the * {@link TasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1462,13 +1582,15 @@ public final TasksResponse tasks(Function + * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks">Documentation * on elastic.co */ @@ -1480,15 +1602,16 @@ public TasksResponse tasks() throws IOException, ElasticsearchException { // ----- Endpoint: cat.templates /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1500,18 +1623,19 @@ public TemplatesResponse templates(TemplatesRequest request) throws IOException, } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @param fn * a function that initializes a builder to create the * {@link TemplatesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1521,15 +1645,16 @@ public final TemplatesResponse templates(Function + * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates">Documentation * on elastic.co */ @@ -1541,14 +1666,16 @@ public TemplatesResponse templates() throws IOException, ElasticsearchException // ----- Endpoint: cat.thread_pool /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1560,17 +1687,19 @@ public ThreadPoolResponse threadPool(ThreadPoolRequest request) throws IOExcepti } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the * {@link ThreadPoolRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1580,14 +1709,16 @@ public final ThreadPoolResponse threadPool(Function + * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool">Documentation * on elastic.co */ @@ -1599,15 +1730,16 @@ public ThreadPoolResponse threadPool() throws IOException, ElasticsearchExceptio // ----- Endpoint: cat.transforms /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application * consumption, use the get transform statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ @@ -1619,8 +1751,9 @@ public TransformsResponse transforms(TransformsRequest request) throws IOExcepti } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1630,7 +1763,7 @@ public TransformsResponse transforms(TransformsRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link TransformsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ @@ -1640,15 +1773,16 @@ public final TransformsResponse transforms(Function + * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application * consumption, use the get transform statistics API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java index 7cbd6677f..c35250c8b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java @@ -57,10 +57,13 @@ // typedef: cat.fielddata.Request /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For + * Get field data cache information. + *

+ * Get the amount of heap memory currently used by the field data cache on every + * data node in the cluster. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For * application consumption, use the nodes stats API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java index a5849749a..f1358a834 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java @@ -55,18 +55,19 @@ // typedef: cat.health.Request /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Get the cluster health status. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java index 78d0d996a..e72870dc6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java @@ -49,7 +49,9 @@ // typedef: cat.help.Request /** - * Get CAT help. Returns help for the CAT APIs. + * Get CAT help. + *

+ * Get help for the CAT APIs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java index 794b548b5..ec163fdf9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java @@ -62,8 +62,10 @@ // typedef: cat.indices.Request /** - * Get index information. Returns high-level information about indices in a - * cluster, including backing indices for data streams. + * Get index information. + *

+ * Get high-level information about indices in a cluster, including backing + * indices for data streams. *

* Use this request to get the following information for each index in a * cluster: diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java index 52e9dfd52..829054cdb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java @@ -55,11 +55,14 @@ // typedef: cat.master.Request /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Get master node information. + *

+ * Get information about the master node, including the ID, bound IP address, + * and name. + *

+ * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java index 2457f9c21..53fb07c37 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java @@ -59,12 +59,14 @@ // typedef: cat.ml_data_frame_analytics.Request /** - * Get data frame analytics jobs. Returns configuration and usage information - * about data frame analytics jobs. + * Get data frame analytics jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get data frame analytics jobs statistics API. + * Get configuration and usage information about data frame analytics jobs. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get data frame analytics jobs statistics + * API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDatafeedsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDatafeedsRequest.java index c8972a2aa..09181a883 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDatafeedsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDatafeedsRequest.java @@ -58,15 +58,17 @@ // typedef: cat.ml_datafeeds.Request /** - * Get datafeeds. Returns configuration and usage information about datafeeds. - * This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security - * features are enabled, you must have monitor_ml, - * monitor, manage_ml, or manage cluster - * privileges to use this API. + * Get datafeeds. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get datafeed statistics API. + * Get configuration and usage information about datafeeds. This API returns a + * maximum of 10,000 datafeeds. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get datafeed statistics API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlJobsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlJobsRequest.java index d217feb63..e390cf4e6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlJobsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlJobsRequest.java @@ -59,15 +59,17 @@ // typedef: cat.ml_jobs.Request /** - * Get anomaly detection jobs. Returns configuration and usage information for - * anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the - * Elasticsearch security features are enabled, you must have - * monitor_ml, monitor, manage_ml, or - * manage cluster privileges to use this API. + * Get anomaly detection jobs. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get anomaly detection job statistics API. + * Get configuration and usage information for anomaly detection jobs. This API + * returns a maximum of 10,000 jobs. If the Elasticsearch security features are + * enabled, you must have monitor_ml, monitor, + * manage_ml, or manage cluster privileges to use this + * API. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get anomaly detection job statistics API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java index b5beb9f66..e2863bcd3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java @@ -60,12 +60,13 @@ // typedef: cat.ml_trained_models.Request /** - * Get trained models. Returns configuration and usage information about - * inference trained models. + * Get trained models. *

- * CAT APIs are only intended for human consumption using the Kibana console or - * command line. They are not intended for use by applications. For application - * consumption, use the get trained models statistics API. + * Get configuration and usage information about inference trained models. + *

+ * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + * console or command line. They are not intended for use by applications. For + * application consumption, use the get trained models statistics API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java index 4c9ffb766..4b811754e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java @@ -55,10 +55,12 @@ // typedef: cat.nodeattrs.Request /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node attribute information. + *

+ * Get information about custom node attributes. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java index 2c8868664..751cc5d97 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java @@ -57,10 +57,12 @@ // typedef: cat.nodes.Request /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Get node information. + *

+ * Get information about the nodes in a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the nodes info API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java index 59c25c500..ea1470974 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java @@ -56,11 +56,12 @@ // typedef: cat.pending_tasks.Request /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Get pending task information. + *

+ * Get information about cluster-level changes that have not yet taken effect. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the pending cluster tasks API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java index 7f2237687..e07574af5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java @@ -55,10 +55,12 @@ // typedef: cat.plugins.Request /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get plugin information. + *

+ * Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs + * are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java index 10bd684f7..5b56e9e76 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java @@ -59,15 +59,16 @@ // typedef: cat.recovery.Request /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Get shard recovery information. + *

+ * Get information about ongoing and completed shard recoveries. Shard recovery + * is the process of initializing a shard copy, such as restoring a primary + * shard from a snapshot or syncing a replica shard from a primary shard. When a + * shard recovery completes, the recovered shard is available for search and + * indexing. For data streams, the API returns information about the stream’s + * backing indices. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the index recovery API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java index b7e42b8eb..1e9a07677 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java @@ -55,11 +55,12 @@ // typedef: cat.repositories.Request /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Get snapshot repository information. + *

+ * Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the get snapshot repository API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java index 9d8a1ac78..a4c1887a9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java @@ -59,11 +59,13 @@ // typedef: cat.segments.Request /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Get segment information. + *

+ * Get low-level information about the Lucene segments in index shards. For data + * streams, the API returns information about the backing indices. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java index 87f0e5e67..bf31b0679 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java @@ -59,10 +59,12 @@ // typedef: cat.shards.Request /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Get shard information. + *

+ * Get information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java index e2b9663c3..aebf1bcd7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java @@ -59,12 +59,13 @@ // typedef: cat.snapshots.Request /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Get snapshot information. + *

+ * Get information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java index 468e1d00f..23a5157e3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java @@ -60,10 +60,12 @@ // typedef: cat.tasks.Request /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Get task information. + *

+ * Get information about tasks currently running in the cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the task management API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java index bd9e46820..ef5c79a54 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java @@ -55,12 +55,13 @@ // typedef: cat.templates.Request /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Get index template information. + *

+ * Get information about the index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java index 9105bd8db..3c81ba35f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java @@ -59,11 +59,13 @@ // typedef: cat.thread_pool.Request /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Get thread pool statistics. + *

+ * Get thread pool statistics for each node in a cluster. Returned information + * includes all built-in thread pools and custom thread pools. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java index c8a3bd62f..20909e0e3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java @@ -59,8 +59,9 @@ // typedef: cat.transforms.Request /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transform information. + *

+ * Get configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/component_templates/ComponentTemplate.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/component_templates/ComponentTemplate.java index 153c8374d..d4fb4db72 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/component_templates/ComponentTemplate.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/component_templates/ComponentTemplate.java @@ -62,6 +62,7 @@ public class ComponentTemplate implements JsonpSerializable { private final String name; + @Nullable private final String version; private final String aliasCount; @@ -79,7 +80,7 @@ public class ComponentTemplate implements JsonpSerializable { private ComponentTemplate(Builder builder) { this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); - this.version = ApiTypeHelper.requireNonNull(builder.version, this, "version"); + this.version = builder.version; this.aliasCount = ApiTypeHelper.requireNonNull(builder.aliasCount, this, "aliasCount"); this.mappingCount = ApiTypeHelper.requireNonNull(builder.mappingCount, this, "mappingCount"); this.settingsCount = ApiTypeHelper.requireNonNull(builder.settingsCount, this, "settingsCount"); @@ -100,8 +101,9 @@ public final String name() { } /** - * Required - API name: {@code version} + * API name: {@code version} */ + @Nullable public final String version() { return this.version; } @@ -155,9 +157,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("name"); generator.write(this.name); - generator.writeKey("version"); - generator.write(this.version); + if (this.version != null) { + generator.writeKey("version"); + generator.write(this.version); + } generator.writeKey("alias_count"); generator.write(this.aliasCount); @@ -189,6 +193,7 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { private String name; + @Nullable private String version; private String aliasCount; @@ -210,9 +215,9 @@ public final Builder name(String value) { } /** - * Required - API name: {@code version} + * API name: {@code version} */ - public final Builder version(String value) { + public final Builder version(@Nullable String value) { this.version = value; return this; } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java index 6fabe3f48..1082094dd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java @@ -71,7 +71,7 @@ public ElasticsearchCcrAsyncClient withTransportOptions(@Nullable TransportOptio * auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern">Documentation * on elastic.co */ @@ -91,7 +91,7 @@ public CompletableFuture deleteAutoFollowPatter * a function that initializes a builder to create the * {@link DeleteAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern">Documentation * on elastic.co */ @@ -109,7 +109,7 @@ public final CompletableFuture deleteAutoFollow * leader index to the follower index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow">Documentation * on elastic.co */ @@ -130,7 +130,7 @@ public CompletableFuture follow(FollowRequest request) { * a function that initializes a builder to create the * {@link FollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow">Documentation * on elastic.co */ @@ -148,7 +148,7 @@ public final CompletableFuture follow( * active or paused. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info">Documentation * on elastic.co */ @@ -169,7 +169,7 @@ public CompletableFuture followInfo(FollowInfoRequest reques * a function that initializes a builder to create the * {@link FollowInfoRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info">Documentation * on elastic.co */ @@ -186,7 +186,7 @@ public final CompletableFuture followInfo( * with each shard for the specified indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats">Documentation * on elastic.co */ @@ -206,7 +206,7 @@ public CompletableFuture followStats(FollowStatsRequest req * a function that initializes a builder to create the * {@link FollowStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats">Documentation * on elastic.co */ @@ -241,7 +241,7 @@ public final CompletableFuture followStats( * retention leases after the unfollow API is invoked. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower">Documentation * on elastic.co */ @@ -279,7 +279,7 @@ public CompletableFuture forgetFollower(ForgetFollowerRe * a function that initializes a builder to create the * {@link ForgetFollowerRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower">Documentation * on elastic.co */ @@ -294,7 +294,7 @@ public final CompletableFuture forgetFollower( * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -312,7 +312,7 @@ public CompletableFuture getAutoFollowPattern(GetA * a function that initializes a builder to create the * {@link GetAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -325,7 +325,7 @@ public final CompletableFuture getAutoFollowPatter * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -350,7 +350,7 @@ public CompletableFuture getAutoFollowPattern() { * the interim. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern">Documentation * on elastic.co */ @@ -379,7 +379,7 @@ public CompletableFuture pauseAutoFollowPattern( * a function that initializes a builder to create the * {@link PauseAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern">Documentation * on elastic.co */ @@ -398,7 +398,7 @@ public final CompletableFuture pauseAutoFollowPa * task. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow">Documentation * on elastic.co */ @@ -420,7 +420,7 @@ public CompletableFuture pauseFollow(PauseFollowRequest req * a function that initializes a builder to create the * {@link PauseFollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow">Documentation * on elastic.co */ @@ -445,7 +445,7 @@ public final CompletableFuture pauseFollow( * patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern">Documentation * on elastic.co */ @@ -473,7 +473,7 @@ public CompletableFuture putAutoFollowPattern(PutA * a function that initializes a builder to create the * {@link PutAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern">Documentation * on elastic.co */ @@ -492,7 +492,7 @@ public final CompletableFuture putAutoFollowPatter * be followed unless they have been deleted or closed in the interim. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern">Documentation * on elastic.co */ @@ -515,7 +515,7 @@ public CompletableFuture resumeAutoFollowPatter * a function that initializes a builder to create the * {@link ResumeAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern">Documentation * on elastic.co */ @@ -534,7 +534,7 @@ public final CompletableFuture resumeAutoFollow * follower index will resume fetching operations from the leader index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow">Documentation * on elastic.co */ @@ -556,7 +556,7 @@ public CompletableFuture resumeFollow(ResumeFollowRequest * a function that initializes a builder to create the * {@link ResumeFollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow">Documentation * on elastic.co */ @@ -572,7 +572,7 @@ public final CompletableFuture resumeFollow( * auto-following and the same shard-level stats as the get follower stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -591,7 +591,7 @@ public CompletableFuture stats(CcrStatsRequest request) { * a function that initializes a builder to create the * {@link CcrStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -605,7 +605,7 @@ public final CompletableFuture stats( * auto-following and the same shard-level stats as the get follower stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -628,7 +628,7 @@ public CompletableFuture stats() { * regular index is an irreversible operation. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow">Documentation * on elastic.co */ @@ -654,7 +654,7 @@ public CompletableFuture unfollow(UnfollowRequest request) { * a function that initializes a builder to create the * {@link UnfollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java index 64a34ffff..c512c67c9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java @@ -72,7 +72,7 @@ public ElasticsearchCcrClient withTransportOptions(@Nullable TransportOptions tr * auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern">Documentation * on elastic.co */ @@ -92,7 +92,7 @@ public DeleteAutoFollowPatternResponse deleteAutoFollowPattern(DeleteAutoFollowP * a function that initializes a builder to create the * {@link DeleteAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern">Documentation * on elastic.co */ @@ -111,7 +111,7 @@ public final DeleteAutoFollowPatternResponse deleteAutoFollowPattern( * leader index to the follower index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow">Documentation * on elastic.co */ @@ -132,7 +132,7 @@ public FollowResponse follow(FollowRequest request) throws IOException, Elastics * a function that initializes a builder to create the * {@link FollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow">Documentation * on elastic.co */ @@ -150,7 +150,7 @@ public final FollowResponse follow(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info">Documentation * on elastic.co */ @@ -171,7 +171,7 @@ public FollowInfoResponse followInfo(FollowInfoRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link FollowInfoRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info">Documentation * on elastic.co */ @@ -188,7 +188,7 @@ public final FollowInfoResponse followInfo(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats">Documentation * on elastic.co */ @@ -208,7 +208,7 @@ public FollowStatsResponse followStats(FollowStatsRequest request) throws IOExce * a function that initializes a builder to create the * {@link FollowStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats">Documentation * on elastic.co */ @@ -244,7 +244,7 @@ public final FollowStatsResponse followStats( * retention leases after the unfollow API is invoked. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower">Documentation * on elastic.co */ @@ -283,7 +283,7 @@ public ForgetFollowerResponse forgetFollower(ForgetFollowerRequest request) * a function that initializes a builder to create the * {@link ForgetFollowerRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower">Documentation * on elastic.co */ @@ -299,7 +299,7 @@ public final ForgetFollowerResponse forgetFollower( * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -318,7 +318,7 @@ public GetAutoFollowPatternResponse getAutoFollowPattern(GetAutoFollowPatternReq * a function that initializes a builder to create the * {@link GetAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -332,7 +332,7 @@ public final GetAutoFollowPatternResponse getAutoFollowPattern( * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1">Documentation * on elastic.co */ @@ -357,7 +357,7 @@ public GetAutoFollowPatternResponse getAutoFollowPattern() throws IOException, E * the interim. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern">Documentation * on elastic.co */ @@ -386,7 +386,7 @@ public PauseAutoFollowPatternResponse pauseAutoFollowPattern(PauseAutoFollowPatt * a function that initializes a builder to create the * {@link PauseAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern">Documentation * on elastic.co */ @@ -406,7 +406,7 @@ public final PauseAutoFollowPatternResponse pauseAutoFollowPattern( * task. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow">Documentation * on elastic.co */ @@ -428,7 +428,7 @@ public PauseFollowResponse pauseFollow(PauseFollowRequest request) throws IOExce * a function that initializes a builder to create the * {@link PauseFollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow">Documentation * on elastic.co */ @@ -454,7 +454,7 @@ public final PauseFollowResponse pauseFollow( * patterns. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern">Documentation * on elastic.co */ @@ -483,7 +483,7 @@ public PutAutoFollowPatternResponse putAutoFollowPattern(PutAutoFollowPatternReq * a function that initializes a builder to create the * {@link PutAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern">Documentation * on elastic.co */ @@ -503,7 +503,7 @@ public final PutAutoFollowPatternResponse putAutoFollowPattern( * be followed unless they have been deleted or closed in the interim. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern">Documentation * on elastic.co */ @@ -526,7 +526,7 @@ public ResumeAutoFollowPatternResponse resumeAutoFollowPattern(ResumeAutoFollowP * a function that initializes a builder to create the * {@link ResumeAutoFollowPatternRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern">Documentation * on elastic.co */ @@ -546,7 +546,7 @@ public final ResumeAutoFollowPatternResponse resumeAutoFollowPattern( * follower index will resume fetching operations from the leader index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow">Documentation * on elastic.co */ @@ -568,7 +568,7 @@ public ResumeFollowResponse resumeFollow(ResumeFollowRequest request) throws IOE * a function that initializes a builder to create the * {@link ResumeFollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow">Documentation * on elastic.co */ @@ -585,7 +585,7 @@ public final ResumeFollowResponse resumeFollow( * auto-following and the same shard-level stats as the get follower stats API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -604,7 +604,7 @@ public CcrStatsResponse stats(CcrStatsRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link CcrStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -618,7 +618,7 @@ public final CcrStatsResponse stats(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats">Documentation * on elastic.co */ @@ -641,7 +641,7 @@ public CcrStatsResponse stats() throws IOException, ElasticsearchException { * regular index is an irreversible operation. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow">Documentation * on elastic.co */ @@ -667,7 +667,7 @@ public UnfollowResponse unfollow(UnfollowRequest request) throws IOException, El * a function that initializes a builder to create the * {@link UnfollowRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateNode.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateNode.java index a591b9af0..fbc48ce34 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateNode.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateNode.java @@ -31,6 +31,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; import java.lang.Long; import java.lang.String; import java.util.Map; @@ -70,6 +71,9 @@ public class ComponentTemplateNode implements JsonpSerializable { private final Map meta; + @Nullable + private final Boolean deprecated; + // --------------------------------------------------------------------------------------------- private ComponentTemplateNode(Builder builder) { @@ -77,6 +81,7 @@ private ComponentTemplateNode(Builder builder) { this.template = ApiTypeHelper.requireNonNull(builder.template, this, "template"); this.version = builder.version; this.meta = ApiTypeHelper.unmodifiable(builder.meta); + this.deprecated = builder.deprecated; } @@ -106,6 +111,14 @@ public final Map meta() { return this.meta; } + /** + * API name: {@code deprecated} + */ + @Nullable + public final Boolean deprecated() { + return this.deprecated; + } + /** * Serialize this object to JSON. */ @@ -136,6 +149,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } + if (this.deprecated != null) { + generator.writeKey("deprecated"); + generator.write(this.deprecated); + + } } @@ -161,6 +179,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private Map meta; + @Nullable + private Boolean deprecated; + /** * Required - API name: {@code template} */ @@ -205,6 +226,14 @@ public final Builder meta(String key, JsonData value) { return this; } + /** + * API name: {@code deprecated} + */ + public final Builder deprecated(@Nullable Boolean value) { + this.deprecated = value; + return this; + } + @Override protected Builder self() { return this; @@ -236,6 +265,7 @@ protected static void setupComponentTemplateNodeDeserializer(ObjectDeserializer< op.add(Builder::template, ComponentTemplateSummary._DESERIALIZER, "template"); op.add(Builder::version, JsonpDeserializer.longDeserializer(), "version"); op.add(Builder::meta, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "_meta"); + op.add(Builder::deprecated, JsonpDeserializer.booleanDeserializer(), "deprecated"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java index 7a8560ef5..eeadc9356 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java @@ -80,7 +80,7 @@ public ElasticsearchClusterAsyncClient withTransportOptions(@Nullable TransportO * when you might expect otherwise. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -104,7 +104,7 @@ public CompletableFuture allocationExplain(Allocation * a function that initializes a builder to create the * {@link AllocationExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -123,7 +123,7 @@ public final CompletableFuture allocationExplain( * when you might expect otherwise. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -140,7 +140,7 @@ public CompletableFuture allocationExplain() { * aliases. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -161,7 +161,7 @@ public CompletableFuture deleteComponentTemplat * a function that initializes a builder to create the * {@link DeleteComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -177,7 +177,7 @@ public final CompletableFuture deleteComponentT * voting configuration exclusion list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -197,7 +197,7 @@ public CompletableFuture deleteVotingConfigExclusions( * a function that initializes a builder to create the * {@link DeleteVotingConfigExclusionsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -211,7 +211,7 @@ public final CompletableFuture deleteVotingConfigExclusions( * voting configuration exclusion list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -227,7 +227,7 @@ public CompletableFuture deleteVotingConfigExclusions() { * component template exists. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -246,7 +246,7 @@ public CompletableFuture existsComponentTemplate(ExistsComponen * a function that initializes a builder to create the * {@link ExistsComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -261,7 +261,7 @@ public final CompletableFuture existsComponentTemplate( * Get component templates. Get information about component templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -279,7 +279,7 @@ public CompletableFuture getComponentTemplate(GetC * a function that initializes a builder to create the * {@link GetComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -292,7 +292,7 @@ public final CompletableFuture getComponentTemplat * Get component templates. Get information about component templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -308,7 +308,7 @@ public CompletableFuture getComponentTemplate() { * been explicitly defined. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -327,7 +327,7 @@ public CompletableFuture getSettings(GetClusterSetti * a function that initializes a builder to create the * {@link GetClusterSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -341,7 +341,7 @@ public final CompletableFuture getSettings( * been explicitly defined. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -353,9 +353,11 @@ public CompletableFuture getSettings() { // ----- Endpoint: cluster.health /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -368,7 +370,7 @@ public CompletableFuture getSettings() { * controlled by the worst index status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -380,9 +382,11 @@ public CompletableFuture health(HealthRequest request) { } /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -398,7 +402,7 @@ public CompletableFuture health(HealthRequest request) { * a function that initializes a builder to create the * {@link HealthRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -408,9 +412,11 @@ public final CompletableFuture health( } /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -423,7 +429,7 @@ public final CompletableFuture health( * controlled by the worst index status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -438,7 +444,7 @@ public CompletableFuture health() { * Get cluster info. Returns basic information about the cluster. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info">Documentation * on elastic.co */ @@ -456,7 +462,7 @@ public CompletableFuture info(ClusterInfoRequest request) { * a function that initializes a builder to create the * {@link ClusterInfoRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info">Documentation * on elastic.co */ @@ -480,7 +486,7 @@ public final CompletableFuture info( * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -507,7 +513,7 @@ public CompletableFuture pendingTasks(PendingTasksRequest * a function that initializes a builder to create the * {@link PendingTasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -529,7 +535,7 @@ public final CompletableFuture pendingTasks( * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -579,7 +585,7 @@ public CompletableFuture pendingTasks() { * half of the master-eligible nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -632,7 +638,7 @@ public CompletableFuture postVotingConfigExclusions(PostVotingC * a function that initializes a builder to create the * {@link PostVotingConfigExclusionsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -680,7 +686,7 @@ public final CompletableFuture postVotingConfigExclusions( * half of the master-eligible nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -721,7 +727,7 @@ public CompletableFuture postVotingConfigExclusions() { * composed_of list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -765,7 +771,7 @@ public CompletableFuture putComponentTemplate(PutC * a function that initializes a builder to create the * {@link PutComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -777,9 +783,11 @@ public final CompletableFuture putComponentTemplat // ----- Endpoint: cluster.put_settings /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -806,7 +814,7 @@ public final CompletableFuture putComponentTemplat * configuration. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -818,9 +826,11 @@ public CompletableFuture putSettings(PutClusterSetti } /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -850,7 +860,7 @@ public CompletableFuture putSettings(PutClusterSetti * a function that initializes a builder to create the * {@link PutClusterSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -860,9 +870,11 @@ public final CompletableFuture putSettings( } /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -889,7 +901,7 @@ public final CompletableFuture putSettings( * configuration. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -901,12 +913,26 @@ public CompletableFuture putSettings() { // ----- Endpoint: cluster.remote_info /** - * Get remote cluster information. Get all of the configured remote cluster - * information. This API returns connection and endpoint information keyed by - * the configured remote cluster alias. - * + * Get remote cluster information. + *

+ * Get information about configured remote clusters. The API returns connection + * and endpoint information keyed by the configured remote cluster alias. + *

+ *

+ * info This API returns information that reflects current state on the local + * cluster. The connected field does not necessarily reflect + * whether a remote cluster is down or unavailable, only whether there is + * currently an open connection to it. Elasticsearch does not spontaneously try + * to reconnect to a disconnected remote cluster. To trigger a reconnection, + * attempt a cross-cluster search, ES|QL cross-cluster search, or try the + * resolve + * cluster endpoint. + *

+ *
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info">Documentation * on elastic.co */ public CompletableFuture remoteInfo() { @@ -946,7 +972,7 @@ public CompletableFuture remoteInfo() { * parameter, which will attempt a single retry round for these shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -990,7 +1016,7 @@ public CompletableFuture reroute(RerouteRequest request) { * a function that initializes a builder to create the * {@link RerouteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -1029,7 +1055,7 @@ public final CompletableFuture reroute( * parameter, which will attempt a single retry round for these shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -1073,7 +1099,7 @@ public CompletableFuture reroute() { * using other more stable cluster APIs. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1120,7 +1146,7 @@ public CompletableFuture state(StateRequest request) { * a function that initializes a builder to create the * {@link StateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1162,7 +1188,7 @@ public final CompletableFuture state( * using other more stable cluster APIs. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1179,7 +1205,7 @@ public CompletableFuture state() { * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ @@ -1199,7 +1225,7 @@ public CompletableFuture stats(ClusterStatsRequest request * a function that initializes a builder to create the * {@link ClusterStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ @@ -1214,7 +1240,7 @@ public final CompletableFuture stats( * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java index 0154ac733..73fb4bfad 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java @@ -78,7 +78,7 @@ public ElasticsearchClusterClient withTransportOptions(@Nullable TransportOption * when you might expect otherwise. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -103,7 +103,7 @@ public AllocationExplainResponse allocationExplain(AllocationExplainRequest requ * a function that initializes a builder to create the * {@link AllocationExplainRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -123,7 +123,7 @@ public final AllocationExplainResponse allocationExplain( * when you might expect otherwise. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain">Documentation * on elastic.co */ @@ -140,7 +140,7 @@ public AllocationExplainResponse allocationExplain() throws IOException, Elastic * aliases. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -161,7 +161,7 @@ public DeleteComponentTemplateResponse deleteComponentTemplate(DeleteComponentTe * a function that initializes a builder to create the * {@link DeleteComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -178,7 +178,7 @@ public final DeleteComponentTemplateResponse deleteComponentTemplate( * voting configuration exclusion list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -198,7 +198,7 @@ public BooleanResponse deleteVotingConfigExclusions(DeleteVotingConfigExclusions * a function that initializes a builder to create the * {@link DeleteVotingConfigExclusionsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -213,7 +213,7 @@ public final BooleanResponse deleteVotingConfigExclusions( * voting configuration exclusion list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -229,7 +229,7 @@ public BooleanResponse deleteVotingConfigExclusions() throws IOException, Elasti * component template exists. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -249,7 +249,7 @@ public BooleanResponse existsComponentTemplate(ExistsComponentTemplateRequest re * a function that initializes a builder to create the * {@link ExistsComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -265,7 +265,7 @@ public final BooleanResponse existsComponentTemplate( * Get component templates. Get information about component templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -284,7 +284,7 @@ public GetComponentTemplateResponse getComponentTemplate(GetComponentTemplateReq * a function that initializes a builder to create the * {@link GetComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -298,7 +298,7 @@ public final GetComponentTemplateResponse getComponentTemplate( * Get component templates. Get information about component templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -314,7 +314,7 @@ public GetComponentTemplateResponse getComponentTemplate() throws IOException, E * been explicitly defined. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -334,7 +334,7 @@ public GetClusterSettingsResponse getSettings(GetClusterSettingsRequest request) * a function that initializes a builder to create the * {@link GetClusterSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -349,7 +349,7 @@ public final GetClusterSettingsResponse getSettings( * been explicitly defined. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings">Documentation * on elastic.co */ @@ -361,9 +361,11 @@ public GetClusterSettingsResponse getSettings() throws IOException, Elasticsearc // ----- Endpoint: cluster.health /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -376,7 +378,7 @@ public GetClusterSettingsResponse getSettings() throws IOException, Elasticsearc * controlled by the worst index status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -388,9 +390,11 @@ public HealthResponse health(HealthRequest request) throws IOException, Elastics } /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -406,7 +410,7 @@ public HealthResponse health(HealthRequest request) throws IOException, Elastics * a function that initializes a builder to create the * {@link HealthRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -416,9 +420,11 @@ public final HealthResponse health(Function + * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. @@ -431,7 +437,7 @@ public final HealthResponse health(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health">Documentation * on elastic.co */ @@ -446,7 +452,7 @@ public HealthResponse health() throws IOException, ElasticsearchException { * Get cluster info. Returns basic information about the cluster. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info">Documentation * on elastic.co */ @@ -464,7 +470,7 @@ public ClusterInfoResponse info(ClusterInfoRequest request) throws IOException, * a function that initializes a builder to create the * {@link ClusterInfoRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info">Documentation * on elastic.co */ @@ -488,7 +494,7 @@ public final ClusterInfoResponse info(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -515,7 +521,7 @@ public PendingTasksResponse pendingTasks(PendingTasksRequest request) throws IOE * a function that initializes a builder to create the * {@link PendingTasksRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -538,7 +544,7 @@ public final PendingTasksResponse pendingTasks( * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks">Documentation * on elastic.co */ @@ -588,7 +594,7 @@ public PendingTasksResponse pendingTasks() throws IOException, ElasticsearchExce * half of the master-eligible nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -642,7 +648,7 @@ public BooleanResponse postVotingConfigExclusions(PostVotingConfigExclusionsRequ * a function that initializes a builder to create the * {@link PostVotingConfigExclusionsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -691,7 +697,7 @@ public final BooleanResponse postVotingConfigExclusions( * half of the master-eligible nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions">Documentation * on elastic.co */ @@ -732,7 +738,7 @@ public BooleanResponse postVotingConfigExclusions() throws IOException, Elastics * composed_of list. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -777,7 +783,7 @@ public PutComponentTemplateResponse putComponentTemplate(PutComponentTemplateReq * a function that initializes a builder to create the * {@link PutComponentTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template">Documentation * on elastic.co */ @@ -790,9 +796,11 @@ public final PutComponentTemplateResponse putComponentTemplate( // ----- Endpoint: cluster.put_settings /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -819,7 +827,7 @@ public final PutComponentTemplateResponse putComponentTemplate( * configuration. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -832,9 +840,11 @@ public PutClusterSettingsResponse putSettings(PutClusterSettingsRequest request) } /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -864,7 +874,7 @@ public PutClusterSettingsResponse putSettings(PutClusterSettingsRequest request) * a function that initializes a builder to create the * {@link PutClusterSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -875,9 +885,11 @@ public final PutClusterSettingsResponse putSettings( } /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also @@ -904,7 +916,7 @@ public final PutClusterSettingsResponse putSettings( * configuration. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings">Documentation * on elastic.co */ @@ -916,12 +928,26 @@ public PutClusterSettingsResponse putSettings() throws IOException, Elasticsearc // ----- Endpoint: cluster.remote_info /** - * Get remote cluster information. Get all of the configured remote cluster - * information. This API returns connection and endpoint information keyed by - * the configured remote cluster alias. - * + * Get remote cluster information. + *

+ * Get information about configured remote clusters. The API returns connection + * and endpoint information keyed by the configured remote cluster alias. + *

+ *

+ * info This API returns information that reflects current state on the local + * cluster. The connected field does not necessarily reflect + * whether a remote cluster is down or unavailable, only whether there is + * currently an open connection to it. Elasticsearch does not spontaneously try + * to reconnect to a disconnected remote cluster. To trigger a reconnection, + * attempt a cross-cluster search, ES|QL cross-cluster search, or try the + * resolve + * cluster endpoint. + *

+ *
+ * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info">Documentation * on elastic.co */ public RemoteInfoResponse remoteInfo() throws IOException, ElasticsearchException { @@ -961,7 +987,7 @@ public RemoteInfoResponse remoteInfo() throws IOException, ElasticsearchExceptio * parameter, which will attempt a single retry round for these shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -1005,7 +1031,7 @@ public RerouteResponse reroute(RerouteRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link RerouteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -1044,7 +1070,7 @@ public final RerouteResponse reroute(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute">Documentation * on elastic.co */ @@ -1088,7 +1114,7 @@ public RerouteResponse reroute() throws IOException, ElasticsearchException { * using other more stable cluster APIs. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1135,7 +1161,7 @@ public StateResponse state(StateRequest request) throws IOException, Elasticsear * a function that initializes a builder to create the * {@link StateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1177,7 +1203,7 @@ public final StateResponse state(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state">Documentation * on elastic.co */ @@ -1194,7 +1220,7 @@ public StateResponse state() throws IOException, ElasticsearchException { * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ @@ -1214,7 +1240,7 @@ public ClusterStatsResponse stats(ClusterStatsRequest request) throws IOExceptio * a function that initializes a builder to create the * {@link ClusterStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ @@ -1230,7 +1256,7 @@ public final ClusterStatsResponse stats( * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java index e3c230002..4059f042c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java @@ -64,9 +64,11 @@ // typedef: cluster.health.Request /** - * Get the cluster health status. You can also use the API to get the health - * status of only specified data streams and indices. For data streams, the API - * retrieves the health status of the stream’s backing indices. + * Get the cluster health status. + *

+ * You can also use the API to get the health status of only specified data + * streams and indices. For data streams, the API retrieves the health status of + * the stream’s backing indices. *

* The cluster health status is: green, yellow or red. On the shard level, a red * status indicates that the specific shard is not allocated in the cluster. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java index d1d0f6cfd..956e8dc83 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java @@ -61,9 +61,11 @@ // typedef: cluster.put_settings.Request /** - * Update the cluster settings. Configure and update dynamic settings on a - * running cluster. You can also configure dynamic settings locally on an - * unstarted or shut down node in elasticsearch.yml. + * Update the cluster settings. + *

+ * Configure and update dynamic settings on a running cluster. You can also + * configure dynamic settings locally on an unstarted or shut down node in + * elasticsearch.yml. *

* Updates made with this API can be persistent, which apply across cluster * restarts, or transient, which reset after a cluster restart. You can also diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java index 7fcef9eb8..53ce6e1f9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java @@ -50,10 +50,24 @@ // typedef: cluster.remote_info.Request /** - * Get remote cluster information. Get all of the configured remote cluster - * information. This API returns connection and endpoint information keyed by - * the configured remote cluster alias. - * + * Get remote cluster information. + *

+ * Get information about configured remote clusters. The API returns connection + * and endpoint information keyed by the configured remote cluster alias. + *

+ *

+ * info This API returns information that reflects current state on the local + * cluster. The connected field does not necessarily reflect + * whether a remote cluster is down or unavailable, only whether there is + * currently an open connection to it. Elasticsearch does not spontaneously try + * to reconnect to a disconnected remote cluster. To trigger a reconnection, + * attempt a cross-cluster search, ES|QL cross-cluster search, or try the + * resolve + * cluster endpoint. + *

+ *
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteProxyInfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteProxyInfo.java index 7bace0409..cddb90687 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteProxyInfo.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteProxyInfo.java @@ -77,6 +77,9 @@ public class ClusterRemoteProxyInfo implements ClusterRemoteInfoVariant, JsonpSe private final int maxProxySocketConnections; + @Nullable + private final String clusterCredentials; + // --------------------------------------------------------------------------------------------- private ClusterRemoteProxyInfo(Builder builder) { @@ -91,6 +94,7 @@ private ClusterRemoteProxyInfo(Builder builder) { "numProxySocketsConnected"); this.maxProxySocketConnections = ApiTypeHelper.requireNonNull(builder.maxProxySocketConnections, this, "maxProxySocketConnections"); + this.clusterCredentials = builder.clusterCredentials; } @@ -107,28 +111,42 @@ public ClusterRemoteInfo.Kind _clusterRemoteInfoKind() { } /** - * Required - API name: {@code connected} + * Required - If it is true, there is at least one open connection + * to the remote cluster. If it is false, it means that the cluster + * no longer has an open connection to the remote cluster. It does not + * necessarily mean that the remote cluster is down or unavailable, just that at + * some point a connection was lost. + *

+ * API name: {@code connected} */ public final boolean connected() { return this.connected; } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Time initialConnectTimeout() { return this.initialConnectTimeout; } /** - * Required - API name: {@code skip_unavailable} + * Required - If true, cross-cluster search skips the remote + * cluster when its nodes are unavailable during the search and ignores errors + * returned by the remote cluster. + *

+ * API name: {@code skip_unavailable} */ public final boolean skipUnavailable() { return this.skipUnavailable; } /** - * Required - API name: {@code proxy_address} + * Required - The address for remote connections when proxy mode is configured. + *

+ * API name: {@code proxy_address} */ public final String proxyAddress() { return this.proxyAddress; @@ -142,19 +160,37 @@ public final String serverName() { } /** - * Required - API name: {@code num_proxy_sockets_connected} + * Required - The number of open socket connections to the remote cluster when + * proxy mode is configured. + *

+ * API name: {@code num_proxy_sockets_connected} */ public final int numProxySocketsConnected() { return this.numProxySocketsConnected; } /** - * Required - API name: {@code max_proxy_socket_connections} + * Required - The maximum number of socket connections to the remote cluster + * when proxy mode is configured. + *

+ * API name: {@code max_proxy_socket_connections} */ public final int maxProxySocketConnections() { return this.maxProxySocketConnections; } + /** + * This field presents and has value of ::es_redacted:: only when the remote + * cluster is configured with the API key based model. Otherwise, the field is + * not present. + *

+ * API name: {@code cluster_credentials} + */ + @Nullable + public final String clusterCredentials() { + return this.clusterCredentials; + } + /** * Serialize this object to JSON. */ @@ -189,6 +225,12 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("max_proxy_socket_connections"); generator.write(this.maxProxySocketConnections); + if (this.clusterCredentials != null) { + generator.writeKey("cluster_credentials"); + generator.write(this.clusterCredentials); + + } + } @Override @@ -219,8 +261,17 @@ public static class Builder extends WithJsonObjectBuilderBase private Integer maxProxySocketConnections; + @Nullable + private String clusterCredentials; + /** - * Required - API name: {@code connected} + * Required - If it is true, there is at least one open connection + * to the remote cluster. If it is false, it means that the cluster + * no longer has an open connection to the remote cluster. It does not + * necessarily mean that the remote cluster is down or unavailable, just that at + * some point a connection was lost. + *

+ * API name: {@code connected} */ public final Builder connected(boolean value) { this.connected = value; @@ -228,7 +279,9 @@ public final Builder connected(boolean value) { } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Builder initialConnectTimeout(Time value) { this.initialConnectTimeout = value; @@ -236,14 +289,20 @@ public final Builder initialConnectTimeout(Time value) { } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Builder initialConnectTimeout(Function> fn) { return this.initialConnectTimeout(fn.apply(new Time.Builder()).build()); } /** - * Required - API name: {@code skip_unavailable} + * Required - If true, cross-cluster search skips the remote + * cluster when its nodes are unavailable during the search and ignores errors + * returned by the remote cluster. + *

+ * API name: {@code skip_unavailable} */ public final Builder skipUnavailable(boolean value) { this.skipUnavailable = value; @@ -251,7 +310,9 @@ public final Builder skipUnavailable(boolean value) { } /** - * Required - API name: {@code proxy_address} + * Required - The address for remote connections when proxy mode is configured. + *

+ * API name: {@code proxy_address} */ public final Builder proxyAddress(String value) { this.proxyAddress = value; @@ -267,7 +328,10 @@ public final Builder serverName(String value) { } /** - * Required - API name: {@code num_proxy_sockets_connected} + * Required - The number of open socket connections to the remote cluster when + * proxy mode is configured. + *

+ * API name: {@code num_proxy_sockets_connected} */ public final Builder numProxySocketsConnected(int value) { this.numProxySocketsConnected = value; @@ -275,13 +339,28 @@ public final Builder numProxySocketsConnected(int value) { } /** - * Required - API name: {@code max_proxy_socket_connections} + * Required - The maximum number of socket connections to the remote cluster + * when proxy mode is configured. + *

+ * API name: {@code max_proxy_socket_connections} */ public final Builder maxProxySocketConnections(int value) { this.maxProxySocketConnections = value; return this; } + /** + * This field presents and has value of ::es_redacted:: only when the remote + * cluster is configured with the API key based model. Otherwise, the field is + * not present. + *

+ * API name: {@code cluster_credentials} + */ + public final Builder clusterCredentials(@Nullable String value) { + this.clusterCredentials = value; + return this; + } + @Override protected Builder self() { return this; @@ -320,6 +399,7 @@ protected static void setupClusterRemoteProxyInfoDeserializer( "num_proxy_sockets_connected"); op.add(Builder::maxProxySocketConnections, JsonpDeserializer.integerDeserializer(), "max_proxy_socket_connections"); + op.add(Builder::clusterCredentials, JsonpDeserializer.stringDeserializer(), "cluster_credentials"); op.ignore("mode"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteSniffInfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteSniffInfo.java index 36729d436..63a4cc049 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteSniffInfo.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/remote_info/ClusterRemoteSniffInfo.java @@ -105,42 +105,63 @@ public ClusterRemoteInfo.Kind _clusterRemoteInfoKind() { } /** - * Required - API name: {@code connected} + * Required - If it is true, there is at least one open connection + * to the remote cluster. If it is false, it means that the cluster + * no longer has an open connection to the remote cluster. It does not + * necessarily mean that the remote cluster is down or unavailable, just that at + * some point a connection was lost. + *

+ * API name: {@code connected} */ public final boolean connected() { return this.connected; } /** - * Required - API name: {@code max_connections_per_cluster} + * Required - The maximum number of connections maintained for the remote + * cluster when sniff mode is configured. + *

+ * API name: {@code max_connections_per_cluster} */ public final int maxConnectionsPerCluster() { return this.maxConnectionsPerCluster; } /** - * Required - API name: {@code num_nodes_connected} + * Required - The number of connected nodes in the remote cluster when sniff + * mode is configured. + *

+ * API name: {@code num_nodes_connected} */ public final long numNodesConnected() { return this.numNodesConnected; } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Time initialConnectTimeout() { return this.initialConnectTimeout; } /** - * Required - API name: {@code skip_unavailable} + * Required - If true, cross-cluster search skips the remote + * cluster when its nodes are unavailable during the search and ignores errors + * returned by the remote cluster. + *

+ * API name: {@code skip_unavailable} */ public final boolean skipUnavailable() { return this.skipUnavailable; } /** - * Required - API name: {@code seeds} + * Required - The initial seed transport addresses of the remote cluster when + * sniff mode is configured. + *

+ * API name: {@code seeds} */ public final List seeds() { return this.seeds; @@ -214,7 +235,13 @@ public static class Builder extends WithJsonObjectBuilderBase private List seeds; /** - * Required - API name: {@code connected} + * Required - If it is true, there is at least one open connection + * to the remote cluster. If it is false, it means that the cluster + * no longer has an open connection to the remote cluster. It does not + * necessarily mean that the remote cluster is down or unavailable, just that at + * some point a connection was lost. + *

+ * API name: {@code connected} */ public final Builder connected(boolean value) { this.connected = value; @@ -222,7 +249,10 @@ public final Builder connected(boolean value) { } /** - * Required - API name: {@code max_connections_per_cluster} + * Required - The maximum number of connections maintained for the remote + * cluster when sniff mode is configured. + *

+ * API name: {@code max_connections_per_cluster} */ public final Builder maxConnectionsPerCluster(int value) { this.maxConnectionsPerCluster = value; @@ -230,7 +260,10 @@ public final Builder maxConnectionsPerCluster(int value) { } /** - * Required - API name: {@code num_nodes_connected} + * Required - The number of connected nodes in the remote cluster when sniff + * mode is configured. + *

+ * API name: {@code num_nodes_connected} */ public final Builder numNodesConnected(long value) { this.numNodesConnected = value; @@ -238,7 +271,9 @@ public final Builder numNodesConnected(long value) { } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Builder initialConnectTimeout(Time value) { this.initialConnectTimeout = value; @@ -246,14 +281,20 @@ public final Builder initialConnectTimeout(Time value) { } /** - * Required - API name: {@code initial_connect_timeout} + * Required - The initial connect timeout for remote cluster connections. + *

+ * API name: {@code initial_connect_timeout} */ public final Builder initialConnectTimeout(Function> fn) { return this.initialConnectTimeout(fn.apply(new Time.Builder()).build()); } /** - * Required - API name: {@code skip_unavailable} + * Required - If true, cross-cluster search skips the remote + * cluster when its nodes are unavailable during the search and ignores errors + * returned by the remote cluster. + *

+ * API name: {@code skip_unavailable} */ public final Builder skipUnavailable(boolean value) { this.skipUnavailable = value; @@ -261,7 +302,10 @@ public final Builder skipUnavailable(boolean value) { } /** - * Required - API name: {@code seeds} + * Required - The initial seed transport addresses of the remote cluster when + * sniff mode is configured. + *

+ * API name: {@code seeds} *

* Adds all elements of list to seeds. */ @@ -271,7 +315,10 @@ public final Builder seeds(List list) { } /** - * Required - API name: {@code seeds} + * Required - The initial seed transport addresses of the remote cluster when + * sniff mode is configured. + *

+ * API name: {@code seeds} *

* Adds one or more values to seeds. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/reroute/CommandAllocateReplicaAction.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/reroute/CommandAllocateReplicaAction.java index 82ddfdd12..e623a38c3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/reroute/CommandAllocateReplicaAction.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/reroute/CommandAllocateReplicaAction.java @@ -56,7 +56,7 @@ /** * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/Connector.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/Connector.java index e33d13fa0..f20e44bf3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/Connector.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/Connector.java @@ -75,6 +75,8 @@ public class Connector implements JsonpSerializable { private final Map customScheduling; + private final boolean deleted; + @Nullable private final String description; @@ -156,6 +158,7 @@ protected Connector(AbstractBuilder builder) { this.apiKeySecretId = builder.apiKeySecretId; this.configuration = ApiTypeHelper.unmodifiableRequired(builder.configuration, this, "configuration"); this.customScheduling = ApiTypeHelper.unmodifiableRequired(builder.customScheduling, this, "customScheduling"); + this.deleted = ApiTypeHelper.requireNonNull(builder.deleted, this, "deleted"); this.description = builder.description; this.error = builder.error; this.features = builder.features; @@ -219,6 +222,13 @@ public final Map customScheduling() { return this.customScheduling; } + /** + * Required - API name: {@code deleted} + */ + public final boolean deleted() { + return this.deleted; + } + /** * API name: {@code description} */ @@ -465,6 +475,9 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } + generator.writeKey("deleted"); + generator.write(this.deleted); + if (this.description != null) { generator.writeKey("description"); generator.write(this.description); @@ -629,6 +642,8 @@ public abstract static class AbstractBuilder customScheduling; + private Boolean deleted; + @Nullable private String description; @@ -778,6 +793,14 @@ public final BuilderT customScheduling(String key, return customScheduling(key, fn.apply(new CustomScheduling.Builder()).build()); } + /** + * Required - API name: {@code deleted} + */ + public final BuilderT deleted(boolean value) { + this.deleted = value; + return self(); + } + /** * API name: {@code description} */ @@ -1050,6 +1073,7 @@ protected static > void setupConnecto JsonpDeserializer.stringMapDeserializer(ConnectorConfigProperties._DESERIALIZER), "configuration"); op.add(AbstractBuilder::customScheduling, JsonpDeserializer.stringMapDeserializer(CustomScheduling._DESERIALIZER), "custom_scheduling"); + op.add(AbstractBuilder::deleted, JsonpDeserializer.booleanDeserializer(), "deleted"); op.add(AbstractBuilder::description, JsonpDeserializer.stringDeserializer(), "description"); op.add(AbstractBuilder::error, JsonpDeserializer.stringDeserializer(), "error"); op.add(AbstractBuilder::features, ConnectorFeatures._DESERIALIZER, "features"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/DeleteConnectorRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/DeleteConnectorRequest.java index 87f611963..fa215b16e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/DeleteConnectorRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/DeleteConnectorRequest.java @@ -73,12 +73,16 @@ public class DeleteConnectorRequest extends RequestBase { @Nullable private final Boolean deleteSyncJobs; + @Nullable + private final Boolean hard; + // --------------------------------------------------------------------------------------------- private DeleteConnectorRequest(Builder builder) { this.connectorId = ApiTypeHelper.requireNonNull(builder.connectorId, this, "connectorId"); this.deleteSyncJobs = builder.deleteSyncJobs; + this.hard = builder.hard; } @@ -106,6 +110,16 @@ public final Boolean deleteSyncJobs() { return this.deleteSyncJobs; } + /** + * A flag indicating if the connector should be hard deleted. + *

+ * API name: {@code hard} + */ + @Nullable + public final Boolean hard() { + return this.hard; + } + // --------------------------------------------------------------------------------------------- /** @@ -120,6 +134,9 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private Boolean deleteSyncJobs; + @Nullable + private Boolean hard; + /** * Required - The unique identifier of the connector to be deleted *

@@ -141,6 +158,16 @@ public final Builder deleteSyncJobs(@Nullable Boolean value) { return this; } + /** + * A flag indicating if the connector should be hard deleted. + *

+ * API name: {@code hard} + */ + public final Builder hard(@Nullable Boolean value) { + this.hard = value; + return this; + } + @Override protected Builder self() { return this; @@ -213,6 +240,9 @@ public DeleteConnectorRequest build() { if (request.deleteSyncJobs != null) { params.put("delete_sync_jobs", String.valueOf(request.deleteSyncJobs)); } + if (request.hard != null) { + params.put("hard", String.valueOf(request.hard)); + } return params; }, SimpleEndpoint.emptyMap(), false, DeleteConnectorResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java index 724106d44..e9cefd4f9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java @@ -76,7 +76,7 @@ public ElasticsearchConnectorAsyncClient withTransportOptions(@Nullable Transpor * current timestamp. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in">Documentation * on elastic.co */ @@ -97,7 +97,7 @@ public CompletableFuture checkIn(CheckInRequest request) { * a function that initializes a builder to create the * {@link CheckInRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in">Documentation * on elastic.co */ @@ -117,7 +117,7 @@ public final CompletableFuture checkIn( * to be removed manually. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete">Documentation * on elastic.co */ @@ -140,7 +140,7 @@ public CompletableFuture delete(DeleteConnectorRequest * a function that initializes a builder to create the * {@link DeleteConnectorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete">Documentation * on elastic.co */ @@ -157,7 +157,7 @@ public final CompletableFuture delete( * Get the details about a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get">Documentation * on elastic.co */ @@ -177,7 +177,7 @@ public CompletableFuture get(GetConnectorRequest request) * a function that initializes a builder to create the * {@link GetConnectorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get">Documentation * on elastic.co */ @@ -194,7 +194,7 @@ public final CompletableFuture get( * Get information about all connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -214,7 +214,7 @@ public CompletableFuture list(ListRequest request) { * a function that initializes a builder to create the * {@link ListRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -228,7 +228,7 @@ public final CompletableFuture list(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -249,7 +249,7 @@ public CompletableFuture list() { * self-managed on your infrastructure. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -273,7 +273,7 @@ public CompletableFuture post(PostRequest request) { * a function that initializes a builder to create the * {@link PostRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -291,7 +291,7 @@ public final CompletableFuture post(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -306,7 +306,7 @@ public CompletableFuture post() { * Create or update a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -324,7 +324,7 @@ public CompletableFuture put(PutRequest request) { * a function that initializes a builder to create the * {@link PutRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -336,7 +336,7 @@ public final CompletableFuture put(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -356,7 +356,7 @@ public CompletableFuture put() { * cancelled. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel">Documentation * on elastic.co */ @@ -379,7 +379,7 @@ public CompletableFuture syncJobCancel(SyncJobCancelReque * a function that initializes a builder to create the * {@link SyncJobCancelRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel">Documentation * on elastic.co */ @@ -400,7 +400,7 @@ public final CompletableFuture syncJobCancel( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in">Documentation * on elastic.co */ @@ -424,7 +424,7 @@ public CompletableFuture syncJobCheckIn(SyncJobCheckInRe * a function that initializes a builder to create the * {@link SyncJobCheckInRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in">Documentation * on elastic.co */ @@ -450,7 +450,7 @@ public final CompletableFuture syncJobCheckIn( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim">Documentation * on elastic.co */ @@ -479,7 +479,7 @@ public CompletableFuture syncJobClaim(SyncJobClaimRequest * a function that initializes a builder to create the * {@link SyncJobClaimRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim">Documentation * on elastic.co */ @@ -497,7 +497,7 @@ public final CompletableFuture syncJobClaim( * action that is not recoverable. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete">Documentation * on elastic.co */ @@ -518,7 +518,7 @@ public CompletableFuture syncJobDelete(SyncJobDeleteReque * a function that initializes a builder to create the * {@link SyncJobDeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete">Documentation * on elastic.co */ @@ -538,7 +538,7 @@ public final CompletableFuture syncJobDelete( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error">Documentation * on elastic.co */ @@ -561,7 +561,7 @@ public CompletableFuture syncJobError(SyncJobErrorRequest * a function that initializes a builder to create the * {@link SyncJobErrorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error">Documentation * on elastic.co */ @@ -576,7 +576,7 @@ public final CompletableFuture syncJobError( * Get a connector sync job. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get">Documentation * on elastic.co */ @@ -594,7 +594,7 @@ public CompletableFuture syncJobGet(SyncJobGetRequest reques * a function that initializes a builder to create the * {@link SyncJobGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get">Documentation * on elastic.co */ @@ -612,7 +612,7 @@ public final CompletableFuture syncJobGet( * date in ascending order. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -633,7 +633,7 @@ public CompletableFuture syncJobList(SyncJobListRequest req * a function that initializes a builder to create the * {@link SyncJobListRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -649,7 +649,7 @@ public final CompletableFuture syncJobList( * date in ascending order. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -667,7 +667,7 @@ public CompletableFuture syncJobList() { * counters and timestamps with default values. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post">Documentation * on elastic.co */ @@ -688,7 +688,7 @@ public CompletableFuture syncJobPost(SyncJobPostRequest req * a function that initializes a builder to create the * {@link SyncJobPostRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post">Documentation * on elastic.co */ @@ -711,7 +711,7 @@ public final CompletableFuture syncJobPost( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats">Documentation * on elastic.co */ @@ -737,7 +737,7 @@ public CompletableFuture syncJobUpdateStats(SyncJobU * a function that initializes a builder to create the * {@link SyncJobUpdateStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats">Documentation * on elastic.co */ @@ -754,7 +754,7 @@ public final CompletableFuture syncJobUpdateStats( * Activates the valid draft filtering for a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -775,7 +775,7 @@ public CompletableFuture updateActiveFiltering( * a function that initializes a builder to create the * {@link UpdateActiveFilteringRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -796,7 +796,7 @@ public final CompletableFuture updateActiveFilter * Self-managed connectors (connector clients) do not use this field. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id">Documentation * on elastic.co */ @@ -820,7 +820,7 @@ public CompletableFuture updateApiKeyId(UpdateApiKeyIdRe * a function that initializes a builder to create the * {@link UpdateApiKeyIdRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id">Documentation * on elastic.co */ @@ -837,7 +837,7 @@ public final CompletableFuture updateApiKeyId( * Update the configuration field in the connector document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration">Documentation * on elastic.co */ @@ -857,7 +857,7 @@ public CompletableFuture updateConfiguration(Update * a function that initializes a builder to create the * {@link UpdateConfigurationRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration">Documentation * on elastic.co */ @@ -876,7 +876,7 @@ public final CompletableFuture updateConfiguration( * the error is reset to null, the connector status is updated to connected. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error">Documentation * on elastic.co */ @@ -898,7 +898,7 @@ public CompletableFuture updateError(UpdateErrorRequest req * a function that initializes a builder to create the * {@link UpdateErrorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error">Documentation * on elastic.co */ @@ -928,7 +928,7 @@ public final CompletableFuture updateError( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features">Documentation * on elastic.co */ @@ -961,7 +961,7 @@ public CompletableFuture updateFeatures(UpdateFeaturesRe * a function that initializes a builder to create the * {@link UpdateFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features">Documentation * on elastic.co */ @@ -981,7 +981,7 @@ public final CompletableFuture updateFeatures( * configure sync rules (both basic and advanced) for a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -1004,7 +1004,7 @@ public CompletableFuture updateFiltering(UpdateFilterin * a function that initializes a builder to create the * {@link UpdateFilteringRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -1021,7 +1021,7 @@ public final CompletableFuture updateFiltering( * Update the draft filtering validation info for a connector. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html">Documentation * on elastic.co */ @@ -1042,7 +1042,7 @@ public CompletableFuture updateFilteringValid * a function that initializes a builder to create the * {@link UpdateFilteringValidationRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html">Documentation * on elastic.co */ @@ -1060,7 +1060,7 @@ public final CompletableFuture updateFilterin * where the data ingested by the connector is stored. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name">Documentation * on elastic.co */ @@ -1081,7 +1081,7 @@ public CompletableFuture updateIndexName(UpdateIndexNam * a function that initializes a builder to create the * {@link UpdateIndexNameRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name">Documentation * on elastic.co */ @@ -1096,7 +1096,7 @@ public final CompletableFuture updateIndexName( * Update the connector name and description. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name">Documentation * on elastic.co */ @@ -1114,7 +1114,7 @@ public CompletableFuture updateName(UpdateNameRequest reques * a function that initializes a builder to create the * {@link UpdateNameRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name">Documentation * on elastic.co */ @@ -1129,7 +1129,7 @@ public final CompletableFuture updateName( * Update the connector is_native flag. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html">Documentation * on elastic.co */ @@ -1147,7 +1147,7 @@ public CompletableFuture updateNative(UpdateNativeRequest * a function that initializes a builder to create the * {@link UpdateNativeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html">Documentation * on elastic.co */ @@ -1165,7 +1165,7 @@ public final CompletableFuture updateNative( * populated with default settings. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline">Documentation * on elastic.co */ @@ -1186,7 +1186,7 @@ public CompletableFuture updatePipeline(UpdatePipelineRe * a function that initializes a builder to create the * {@link UpdatePipelineRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline">Documentation * on elastic.co */ @@ -1201,7 +1201,7 @@ public final CompletableFuture updatePipeline( * Update the connector scheduling. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling">Documentation * on elastic.co */ @@ -1219,7 +1219,7 @@ public CompletableFuture updateScheduling(UpdateSchedu * a function that initializes a builder to create the * {@link UpdateSchedulingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling">Documentation * on elastic.co */ @@ -1234,7 +1234,7 @@ public final CompletableFuture updateScheduling( * Update the connector service type. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type">Documentation * on elastic.co */ @@ -1252,7 +1252,7 @@ public CompletableFuture updateServiceType(UpdateServ * a function that initializes a builder to create the * {@link UpdateServiceTypeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type">Documentation * on elastic.co */ @@ -1267,7 +1267,7 @@ public final CompletableFuture updateServiceType( * Update the connector status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status">Documentation * on elastic.co */ @@ -1285,7 +1285,7 @@ public CompletableFuture updateStatus(UpdateStatusRequest * a function that initializes a builder to create the * {@link UpdateStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java index 12cc6d4ca..1b4790874 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java @@ -74,7 +74,7 @@ public ElasticsearchConnectorClient withTransportOptions(@Nullable TransportOpti * current timestamp. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in">Documentation * on elastic.co */ @@ -95,7 +95,7 @@ public CheckInResponse checkIn(CheckInRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link CheckInRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in">Documentation * on elastic.co */ @@ -115,7 +115,7 @@ public final CheckInResponse checkIn(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete">Documentation * on elastic.co */ @@ -138,7 +138,7 @@ public DeleteConnectorResponse delete(DeleteConnectorRequest request) throws IOE * a function that initializes a builder to create the * {@link DeleteConnectorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete">Documentation * on elastic.co */ @@ -156,7 +156,7 @@ public final DeleteConnectorResponse delete( * Get the details about a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get">Documentation * on elastic.co */ @@ -176,7 +176,7 @@ public GetConnectorResponse get(GetConnectorRequest request) throws IOException, * a function that initializes a builder to create the * {@link GetConnectorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get">Documentation * on elastic.co */ @@ -193,7 +193,7 @@ public final GetConnectorResponse get(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -213,7 +213,7 @@ public ListResponse list(ListRequest request) throws IOException, ElasticsearchE * a function that initializes a builder to create the * {@link ListRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -228,7 +228,7 @@ public final ListResponse list(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list">Documentation * on elastic.co */ @@ -249,7 +249,7 @@ public ListResponse list() throws IOException, ElasticsearchException { * self-managed on your infrastructure. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -273,7 +273,7 @@ public PostResponse post(PostRequest request) throws IOException, ElasticsearchE * a function that initializes a builder to create the * {@link PostRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -292,7 +292,7 @@ public final PostResponse post(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -307,7 +307,7 @@ public PostResponse post() throws IOException, ElasticsearchException { * Create or update a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -325,7 +325,7 @@ public PutResponse put(PutRequest request) throws IOException, ElasticsearchExce * a function that initializes a builder to create the * {@link PutRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -338,7 +338,7 @@ public final PutResponse put(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put">Documentation * on elastic.co */ @@ -358,7 +358,7 @@ public PutResponse put() throws IOException, ElasticsearchException { * cancelled. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel">Documentation * on elastic.co */ @@ -382,7 +382,7 @@ public SyncJobCancelResponse syncJobCancel(SyncJobCancelRequest request) * a function that initializes a builder to create the * {@link SyncJobCancelRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel">Documentation * on elastic.co */ @@ -404,7 +404,7 @@ public final SyncJobCancelResponse syncJobCancel( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in">Documentation * on elastic.co */ @@ -429,7 +429,7 @@ public SyncJobCheckInResponse syncJobCheckIn(SyncJobCheckInRequest request) * a function that initializes a builder to create the * {@link SyncJobCheckInRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in">Documentation * on elastic.co */ @@ -456,7 +456,7 @@ public final SyncJobCheckInResponse syncJobCheckIn( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim">Documentation * on elastic.co */ @@ -485,7 +485,7 @@ public SyncJobClaimResponse syncJobClaim(SyncJobClaimRequest request) throws IOE * a function that initializes a builder to create the * {@link SyncJobClaimRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim">Documentation * on elastic.co */ @@ -504,7 +504,7 @@ public final SyncJobClaimResponse syncJobClaim( * action that is not recoverable. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete">Documentation * on elastic.co */ @@ -526,7 +526,7 @@ public SyncJobDeleteResponse syncJobDelete(SyncJobDeleteRequest request) * a function that initializes a builder to create the * {@link SyncJobDeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete">Documentation * on elastic.co */ @@ -547,7 +547,7 @@ public final SyncJobDeleteResponse syncJobDelete( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error">Documentation * on elastic.co */ @@ -570,7 +570,7 @@ public SyncJobErrorResponse syncJobError(SyncJobErrorRequest request) throws IOE * a function that initializes a builder to create the * {@link SyncJobErrorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error">Documentation * on elastic.co */ @@ -586,7 +586,7 @@ public final SyncJobErrorResponse syncJobError( * Get a connector sync job. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get">Documentation * on elastic.co */ @@ -604,7 +604,7 @@ public SyncJobGetResponse syncJobGet(SyncJobGetRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link SyncJobGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get">Documentation * on elastic.co */ @@ -622,7 +622,7 @@ public final SyncJobGetResponse syncJobGet(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -643,7 +643,7 @@ public SyncJobListResponse syncJobList(SyncJobListRequest request) throws IOExce * a function that initializes a builder to create the * {@link SyncJobListRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -660,7 +660,7 @@ public final SyncJobListResponse syncJobList( * date in ascending order. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list">Documentation * on elastic.co */ @@ -678,7 +678,7 @@ public SyncJobListResponse syncJobList() throws IOException, ElasticsearchExcept * counters and timestamps with default values. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post">Documentation * on elastic.co */ @@ -699,7 +699,7 @@ public SyncJobPostResponse syncJobPost(SyncJobPostRequest request) throws IOExce * a function that initializes a builder to create the * {@link SyncJobPostRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post">Documentation * on elastic.co */ @@ -723,7 +723,7 @@ public final SyncJobPostResponse syncJobPost( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats">Documentation * on elastic.co */ @@ -750,7 +750,7 @@ public SyncJobUpdateStatsResponse syncJobUpdateStats(SyncJobUpdateStatsRequest r * a function that initializes a builder to create the * {@link SyncJobUpdateStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats">Documentation * on elastic.co */ @@ -768,7 +768,7 @@ public final SyncJobUpdateStatsResponse syncJobUpdateStats( * Activates the valid draft filtering for a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -789,7 +789,7 @@ public UpdateActiveFilteringResponse updateActiveFiltering(UpdateActiveFiltering * a function that initializes a builder to create the * {@link UpdateActiveFilteringRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -811,7 +811,7 @@ public final UpdateActiveFilteringResponse updateActiveFiltering( * Self-managed connectors (connector clients) do not use this field. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id">Documentation * on elastic.co */ @@ -836,7 +836,7 @@ public UpdateApiKeyIdResponse updateApiKeyId(UpdateApiKeyIdRequest request) * a function that initializes a builder to create the * {@link UpdateApiKeyIdRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id">Documentation * on elastic.co */ @@ -854,7 +854,7 @@ public final UpdateApiKeyIdResponse updateApiKeyId( * Update the configuration field in the connector document. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration">Documentation * on elastic.co */ @@ -875,7 +875,7 @@ public UpdateConfigurationResponse updateConfiguration(UpdateConfigurationReques * a function that initializes a builder to create the * {@link UpdateConfigurationRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration">Documentation * on elastic.co */ @@ -895,7 +895,7 @@ public final UpdateConfigurationResponse updateConfiguration( * the error is reset to null, the connector status is updated to connected. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error">Documentation * on elastic.co */ @@ -917,7 +917,7 @@ public UpdateErrorResponse updateError(UpdateErrorRequest request) throws IOExce * a function that initializes a builder to create the * {@link UpdateErrorRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error">Documentation * on elastic.co */ @@ -948,7 +948,7 @@ public final UpdateErrorResponse updateError( * on Elastic Cloud for Elastic managed connectors. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features">Documentation * on elastic.co */ @@ -982,7 +982,7 @@ public UpdateFeaturesResponse updateFeatures(UpdateFeaturesRequest request) * a function that initializes a builder to create the * {@link UpdateFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features">Documentation * on elastic.co */ @@ -1003,7 +1003,7 @@ public final UpdateFeaturesResponse updateFeatures( * configure sync rules (both basic and advanced) for a connector. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -1027,7 +1027,7 @@ public UpdateFilteringResponse updateFiltering(UpdateFilteringRequest request) * a function that initializes a builder to create the * {@link UpdateFilteringRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering">Documentation * on elastic.co */ @@ -1045,7 +1045,7 @@ public final UpdateFilteringResponse updateFiltering( * Update the draft filtering validation info for a connector. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html">Documentation * on elastic.co */ @@ -1066,7 +1066,7 @@ public UpdateFilteringValidationResponse updateFilteringValidation(UpdateFilteri * a function that initializes a builder to create the * {@link UpdateFilteringValidationRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html">Documentation * on elastic.co */ @@ -1085,7 +1085,7 @@ public final UpdateFilteringValidationResponse updateFilteringValidation( * where the data ingested by the connector is stored. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name">Documentation * on elastic.co */ @@ -1107,7 +1107,7 @@ public UpdateIndexNameResponse updateIndexName(UpdateIndexNameRequest request) * a function that initializes a builder to create the * {@link UpdateIndexNameRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name">Documentation * on elastic.co */ @@ -1123,7 +1123,7 @@ public final UpdateIndexNameResponse updateIndexName( * Update the connector name and description. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name">Documentation * on elastic.co */ @@ -1141,7 +1141,7 @@ public UpdateNameResponse updateName(UpdateNameRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link UpdateNameRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name">Documentation * on elastic.co */ @@ -1156,7 +1156,7 @@ public final UpdateNameResponse updateName(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html">Documentation * on elastic.co */ @@ -1174,7 +1174,7 @@ public UpdateNativeResponse updateNative(UpdateNativeRequest request) throws IOE * a function that initializes a builder to create the * {@link UpdateNativeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html">Documentation * on elastic.co */ @@ -1193,7 +1193,7 @@ public final UpdateNativeResponse updateNative( * populated with default settings. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline">Documentation * on elastic.co */ @@ -1215,7 +1215,7 @@ public UpdatePipelineResponse updatePipeline(UpdatePipelineRequest request) * a function that initializes a builder to create the * {@link UpdatePipelineRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline">Documentation * on elastic.co */ @@ -1231,7 +1231,7 @@ public final UpdatePipelineResponse updatePipeline( * Update the connector scheduling. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling">Documentation * on elastic.co */ @@ -1250,7 +1250,7 @@ public UpdateSchedulingResponse updateScheduling(UpdateSchedulingRequest request * a function that initializes a builder to create the * {@link UpdateSchedulingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling">Documentation * on elastic.co */ @@ -1266,7 +1266,7 @@ public final UpdateSchedulingResponse updateScheduling( * Update the connector service type. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type">Documentation * on elastic.co */ @@ -1285,7 +1285,7 @@ public UpdateServiceTypeResponse updateServiceType(UpdateServiceTypeRequest requ * a function that initializes a builder to create the * {@link UpdateServiceTypeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type">Documentation * on elastic.co */ @@ -1301,7 +1301,7 @@ public final UpdateServiceTypeResponse updateServiceType( * Update the connector status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status">Documentation * on elastic.co */ @@ -1319,7 +1319,7 @@ public UpdateStatusResponse updateStatus(UpdateStatusRequest request) throws IOE * a function that initializes a builder to create the * {@link UpdateStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/GetConnectorRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/GetConnectorRequest.java index 400a33927..bd4a9a878 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/GetConnectorRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/GetConnectorRequest.java @@ -30,8 +30,8 @@ import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; import java.lang.String; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -67,11 +67,15 @@ public class GetConnectorRequest extends RequestBase { private final String connectorId; + @Nullable + private final Boolean includeDeleted; + // --------------------------------------------------------------------------------------------- private GetConnectorRequest(Builder builder) { this.connectorId = ApiTypeHelper.requireNonNull(builder.connectorId, this, "connectorId"); + this.includeDeleted = builder.includeDeleted; } @@ -88,6 +92,17 @@ public final String connectorId() { return this.connectorId; } + /** + * A flag to indicate if the desired connector should be fetched, even if it was + * soft-deleted. + *

+ * API name: {@code include_deleted} + */ + @Nullable + public final Boolean includeDeleted() { + return this.includeDeleted; + } + // --------------------------------------------------------------------------------------------- /** @@ -99,6 +114,9 @@ public static class Builder extends RequestBase.AbstractBuilder ObjectBuilder { private String connectorId; + @Nullable + private Boolean includeDeleted; + /** * Required - The unique identifier of the connector *

@@ -109,6 +127,17 @@ public final Builder connectorId(String value) { return this; } + /** + * A flag to indicate if the desired connector should be fetched, even if it was + * soft-deleted. + *

+ * API name: {@code include_deleted} + */ + public final Builder includeDeleted(@Nullable Boolean value) { + this.includeDeleted = value; + return this; + } + @Override protected Builder self() { return this; @@ -177,7 +206,11 @@ public GetConnectorRequest build() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.includeDeleted != null) { + params.put("include_deleted", String.valueOf(request.includeDeleted)); + } + return params; }, SimpleEndpoint.emptyMap(), false, GetConnectorResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ListRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ListRequest.java index 439c4094d..9e1d618d9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ListRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ListRequest.java @@ -30,6 +30,7 @@ import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; import java.lang.Integer; import java.lang.String; import java.util.Collections; @@ -73,6 +74,9 @@ public class ListRequest extends RequestBase { @Nullable private final Integer from; + @Nullable + private final Boolean includeDeleted; + private final List indexName; @Nullable @@ -89,6 +93,7 @@ private ListRequest(Builder builder) { this.connectorName = ApiTypeHelper.unmodifiable(builder.connectorName); this.from = builder.from; + this.includeDeleted = builder.includeDeleted; this.indexName = ApiTypeHelper.unmodifiable(builder.indexName); this.query = builder.query; this.serviceType = ApiTypeHelper.unmodifiable(builder.serviceType); @@ -119,6 +124,17 @@ public final Integer from() { return this.from; } + /** + * A flag to indicate if the desired connector should be fetched, even if it was + * soft-deleted. + *

+ * API name: {@code include_deleted} + */ + @Nullable + public final Boolean includeDeleted() { + return this.includeDeleted; + } + /** * A comma-separated list of connector index names to fetch connector documents * for @@ -173,6 +189,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private Integer from; + @Nullable + private Boolean includeDeleted; + @Nullable private List indexName; @@ -219,6 +238,17 @@ public final Builder from(@Nullable Integer value) { return this; } + /** + * A flag to indicate if the desired connector should be fetched, even if it was + * soft-deleted. + *

+ * API name: {@code include_deleted} + */ + public final Builder includeDeleted(@Nullable Boolean value) { + this.includeDeleted = value; + return this; + } + /** * A comma-separated list of connector index names to fetch connector documents * for @@ -351,6 +381,9 @@ public ListRequest build() { if (request.from != null) { params.put("from", String.valueOf(request.from)); } + if (request.includeDeleted != null) { + params.put("include_deleted", String.valueOf(request.includeDeleted)); + } if (ApiTypeHelper.isDefined(request.connectorName)) { params.put("connector_name", request.connectorName.stream().map(v -> v).collect(Collectors.joining(","))); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java index b81e9ab31..3abb851a4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java @@ -67,9 +67,166 @@ // typedef: _global.bulk.Request /** - * Bulk index or delete documents. Performs multiple indexing or delete - * operations in a single API call. This reduces overhead and can greatly - * increase indexing speed. + * Bulk index or delete documents. Perform multiple index, + * create, delete, and update actions in + * a single request. This reduces overhead and can greatly increase indexing + * speed. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To use the create action, you must have the + * create_doc, create, index, or + * write index privilege. Data streams support only the + * create action.
  • + *
  • To use the index action, you must have the + * create, index, or write index + * privilege.
  • + *
  • To use the delete action, you must have the + * delete or write index privilege.
  • + *
  • To use the update action, you must have the + * index or write index privilege.
  • + *
  • To automatically create a data stream or index with a bulk API request, + * you must have the auto_configure, create_index, or + * manage index privilege.
  • + *
  • To make the result of a bulk operation visible to search using the + * refresh parameter, you must have the maintenance or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * The actions are specified in the request body using a newline delimited JSON + * (NDJSON) structure: + * + *

+ * action_and_meta_data\n
+ * optional_source\n
+ * action_and_meta_data\n
+ * optional_source\n
+ * ....
+ * action_and_meta_data\n
+ * optional_source\n
+ * 
+ * 
+ *

+ * The index and create actions expect a source on the + * next line and have the same semantics as the op_type parameter + * in the standard index API. A create action fails if a document + * with the same ID already exists in the target An index action + * adds or replaces a document as necessary. + *

+ * NOTE: Data streams support only the create action. To update or + * delete a document in a data stream, you must target the backing index + * containing the document. + *

+ * An update action expects that the partial doc, upsert, and + * script and its options are specified on the next line. + *

+ * A delete action does not expect a source on the next line and + * has the same semantics as the standard delete API. + *

+ * NOTE: The final line of data must end with a newline character + * (\n). Each newline character may be preceded by a carriage + * return (\r). When sending NDJSON data to the _bulk + * endpoint, use a Content-Type header of + * application/json or application/x-ndjson. Because + * this format uses literal newline characters (\n) as delimiters, + * make sure that the JSON actions and sources are not pretty printed. + *

+ * If you provide a target in the request path, it is used for any actions that + * don't explicitly specify an _index argument. + *

+ * A note on the format: the idea here is to make processing as fast as + * possible. As some of the actions are redirected to other shards on other + * nodes, only action_meta_data is parsed on the receiving node + * side. + *

+ * Client libraries using this protocol should try and strive to do something + * similar on the client side, and reduce buffering as much as possible. + *

+ * There is no "correct" number of actions to perform in a single bulk + * request. Experiment with different settings to find the optimal size for your + * particular workload. Note that Elasticsearch limits the maximum size of a + * HTTP request to 100mb by default so clients must ensure that no request + * exceeds this size. It is not possible to index a single document that exceeds + * the size limit, so you must pre-process any such documents into smaller + * pieces before sending them to Elasticsearch. For instance, split documents + * into pages or chapters before indexing them, or store raw binary data in a + * system outside Elasticsearch and replace the raw data with a link to the + * external system in the documents that you send to Elasticsearch. + *

+ * Client suppport for bulk requests + *

+ * Some of the officially supported clients provide helpers to assist with bulk + * requests and reindexing: + *

    + *
  • Go: Check out esutil.BulkIndexer
  • + *
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and + * Search::Elasticsearch::Client::5_0::Scroll
  • + *
  • Python: Check out elasticsearch.helpers.*
  • + *
  • JavaScript: Check out client.helpers.*
  • + *
  • .NET: Check out BulkAllObservable
  • + *
  • PHP: Check out bulk indexing.
  • + *
+ *

+ * Submitting bulk requests with cURL + *

+ * If you're providing text file input to curl, you must use the + * --data-binary flag instead of plain -d. The latter + * doesn't preserve newlines. For example: + * + *

+ * $ cat requests
+ * { "index" : { "_index" : "test", "_id" : "1" } }
+ * { "field1" : "value1" }
+ * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+ * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+ * 
+ * 
+ *

+ * Optimistic concurrency control + *

+ * Each index and delete action within a bulk API call + * may include the if_seq_no and if_primary_term + * parameters in their respective action and meta data lines. The + * if_seq_no and if_primary_term parameters control + * how operations are run, based on the last modification to existing documents. + * See Optimistic concurrency control for more details. + *

+ * Versioning + *

+ * Each bulk item can include the version value using the version + * field. It automatically follows the behavior of the index or delete operation + * based on the _version mapping. It also support the + * version_type. + *

+ * Routing + *

+ * Each bulk item can include the routing value using the routing + * field. It automatically follows the behavior of the index or delete operation + * based on the _routing mapping. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Wait for active shards + *

+ * When making bulk calls, you can set the wait_for_active_shards + * parameter to require a minimum number of shard copies to be active before + * starting to process the bulk request. + *

+ * Refresh + *

+ * Control when the changes made by this request are visible to search. + *

+ * NOTE: Only the shards that receive the bulk request will be affected by + * refresh. Imagine a _bulk?refresh=wait_for request with three + * documents in it that happen to be routed to different shards in an index with + * five shards. The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the + * _bulk request at all. * * @see API * specification @@ -141,8 +298,8 @@ public Iterator _serializables() { return this.operations.iterator(); } /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or contains a list of fields to return. *

* API name: {@code _source} */ @@ -152,7 +309,10 @@ public final SourceConfigParam source() { } /** - * A comma-separated list of source fields to exclude from the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} */ @@ -161,7 +321,11 @@ public final List sourceExcludes() { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} */ @@ -170,7 +334,8 @@ public final List sourceIncludes() { } /** - * Name of the data stream, index, or index alias to perform bulk actions on. + * The name of the data stream, index, or index alias to perform bulk actions + * on. *

* API name: {@code index} */ @@ -181,7 +346,7 @@ public final String index() { /** * If true, the response will include the ingest pipelines that - * were executed for each index or create. + * were run for each index or create. *

* API name: {@code list_executed_pipelines} */ @@ -191,10 +356,10 @@ public final Boolean listExecutedPipelines() { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The pipeline identifier to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -206,8 +371,8 @@ public final String pipeline() { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do + * this operation visible to search. If wait_for, wait for a + * refresh to make this operation visible to search. If false, do * nothing with refreshes. Valid values: true, false, * wait_for. *

@@ -219,7 +384,7 @@ public final Refresh refresh() { } /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ @@ -230,7 +395,7 @@ public final Boolean requireAlias() { /** * If true, the request's actions must target a data stream - * (existing or to-be-created). + * (existing or to be created). *

* API name: {@code require_data_stream} */ @@ -240,7 +405,7 @@ public final Boolean requireDataStream() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -250,8 +415,11 @@ public final String routing() { } /** - * Period each action waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period each action waits for the following operations: automatic index + * creation, dynamic mapping updates, and waiting for active shards. The default + * is 1m (one minute), which guarantees Elasticsearch waits for at + * least the timeout before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -262,8 +430,9 @@ public final Time timeout() { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default is 1, which waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -338,8 +507,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private List operations; /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or contains a list of fields to return. *

* API name: {@code _source} */ @@ -349,8 +518,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or contains a list of fields to return. *

* API name: {@code _source} */ @@ -359,7 +528,10 @@ public final Builder source(Function_source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -371,7 +543,10 @@ public final Builder sourceExcludes(List list) { } /** - * A comma-separated list of source fields to exclude from the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -383,7 +558,11 @@ public final Builder sourceExcludes(String value, String... values) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -395,7 +574,11 @@ public final Builder sourceIncludes(List list) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -407,7 +590,8 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * Name of the data stream, index, or index alias to perform bulk actions on. + * The name of the data stream, index, or index alias to perform bulk actions + * on. *

* API name: {@code index} */ @@ -418,7 +602,7 @@ public final Builder index(@Nullable String value) { /** * If true, the response will include the ingest pipelines that - * were executed for each index or create. + * were run for each index or create. *

* API name: {@code list_executed_pipelines} */ @@ -428,10 +612,10 @@ public final Builder listExecutedPipelines(@Nullable Boolean value) { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The pipeline identifier to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -443,8 +627,8 @@ public final Builder pipeline(@Nullable String value) { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do + * this operation visible to search. If wait_for, wait for a + * refresh to make this operation visible to search. If false, do * nothing with refreshes. Valid values: true, false, * wait_for. *

@@ -456,7 +640,7 @@ public final Builder refresh(@Nullable Refresh value) { } /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ @@ -467,7 +651,7 @@ public final Builder requireAlias(@Nullable Boolean value) { /** * If true, the request's actions must target a data stream - * (existing or to-be-created). + * (existing or to be created). *

* API name: {@code require_data_stream} */ @@ -477,7 +661,7 @@ public final Builder requireDataStream(@Nullable Boolean value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -487,8 +671,11 @@ public final Builder routing(@Nullable String value) { } /** - * Period each action waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period each action waits for the following operations: automatic index + * creation, dynamic mapping updates, and waiting for active shards. The default + * is 1m (one minute), which guarantees Elasticsearch waits for at + * least the timeout before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -498,8 +685,11 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period each action waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period each action waits for the following operations: automatic index + * creation, dynamic mapping updates, and waiting for active shards. The default + * is 1m (one minute), which guarantees Elasticsearch waits for at + * least the timeout before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -509,8 +699,9 @@ public final Builder timeout(Function> fn) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default is 1, which waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -521,8 +712,9 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default is 1, which waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkResponse.java index 006d6e21b..99d425630 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkResponse.java @@ -87,21 +87,30 @@ public static BulkResponse of(Function> fn) } /** - * Required - API name: {@code errors} + * Required - If true, one or more of the operations in the bulk + * request did not complete successfully. + *

+ * API name: {@code errors} */ public final boolean errors() { return this.errors; } /** - * Required - API name: {@code items} + * Required - The result of each operation in the bulk request, in the order + * they were submitted. + *

+ * API name: {@code items} */ public final List items() { return this.items; } /** - * Required - API name: {@code took} + * Required - The length of time, in milliseconds, it took to process the bulk + * request. + *

+ * API name: {@code took} */ public final long took() { return this.took; @@ -172,7 +181,10 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Long ingestTook; /** - * Required - API name: {@code errors} + * Required - If true, one or more of the operations in the bulk + * request did not complete successfully. + *

+ * API name: {@code errors} */ public final Builder errors(boolean value) { this.errors = value; @@ -180,7 +192,10 @@ public final Builder errors(boolean value) { } /** - * Required - API name: {@code items} + * Required - The result of each operation in the bulk request, in the order + * they were submitted. + *

+ * API name: {@code items} *

* Adds all elements of list to items. */ @@ -190,7 +205,10 @@ public final Builder items(List list) { } /** - * Required - API name: {@code items} + * Required - The result of each operation in the bulk request, in the order + * they were submitted. + *

+ * API name: {@code items} *

* Adds one or more values to items. */ @@ -200,7 +218,10 @@ public final Builder items(BulkResponseItem value, BulkResponseItem... values) { } /** - * Required - API name: {@code items} + * Required - The result of each operation in the bulk request, in the order + * they were submitted. + *

+ * API name: {@code items} *

* Adds a value to items using a builder lambda. */ @@ -209,7 +230,10 @@ public final Builder items(Function + * API name: {@code took} */ public final Builder took(long value) { this.took = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollRequest.java index b5dde92f7..3157ed473 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollRequest.java @@ -57,9 +57,8 @@ // typedef: _global.clear_scroll.Request /** - * Clear a scrolling search. - *

- * Clear the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a + * scrolling search. * * @see API * specification @@ -81,7 +80,7 @@ public static ClearScrollRequest of(Function_all. + * The scroll IDs to clear. To clear all scroll IDs, use _all. *

* API name: {@code scroll_id} */ @@ -126,7 +125,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List scrollId; /** - * Scroll IDs to clear. To clear all scroll IDs, use _all. + * The scroll IDs to clear. To clear all scroll IDs, use _all. *

* API name: {@code scroll_id} *

@@ -138,7 +137,7 @@ public final Builder scrollId(List list) { } /** - * Scroll IDs to clear. To clear all scroll IDs, use _all. + * The scroll IDs to clear. To clear all scroll IDs, use _all. *

* API name: {@code scroll_id} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollResponse.java index f8f7ac60f..ff99d78af 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClearScrollResponse.java @@ -77,14 +77,19 @@ public static ClearScrollResponse of(Functiontrue, the request succeeded. This does not + * indicate whether any scrolling search requests were cleared. + *

+ * API name: {@code succeeded} */ public final boolean succeeded() { return this.succeeded; } /** - * Required - API name: {@code num_freed} + * Required - The number of scrolling search requests cleared. + *

+ * API name: {@code num_freed} */ public final int numFreed() { return this.numFreed; @@ -128,7 +133,10 @@ public static class Builder extends WithJsonObjectBuilderBase private Integer numFreed; /** - * Required - API name: {@code succeeded} + * Required - If true, the request succeeded. This does not + * indicate whether any scrolling search requests were cleared. + *

+ * API name: {@code succeeded} */ public final Builder succeeded(boolean value) { this.succeeded = value; @@ -136,7 +144,9 @@ public final Builder succeeded(boolean value) { } /** - * Required - API name: {@code num_freed} + * Required - The number of scrolling search requests cleared. + *

+ * API name: {@code num_freed} */ public final Builder numFreed(int value) { this.numFreed = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeRequest.java index 733803252..0bd98486b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeRequest.java @@ -56,14 +56,12 @@ // typedef: _global.close_point_in_time.Request /** - * Close a point in time. - *

- * A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. A point in time is automatically closed when the - * keep_alive period has elapsed. However, keeping points in time - * has a cost; close them as soon as they are no longer required for search - * requests. + * Close a point in time. A point in time must be opened explicitly before being + * used in search requests. The keep_alive parameter tells + * Elasticsearch how long it should persist. A point in time is automatically + * closed when the keep_alive period has elapsed. However, keeping + * points in time has a cost; close them as soon as they are no longer required + * for search requests. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeResponse.java index 4cdc7fbfa..f65487fcb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ClosePointInTimeResponse.java @@ -78,14 +78,19 @@ public static ClosePointInTimeResponse of(Functiontrue, all search contexts associated with the + * point-in-time ID were successfully closed. + *

+ * API name: {@code succeeded} */ public final boolean succeeded() { return this.succeeded; } /** - * Required - API name: {@code num_freed} + * Required - The number of search contexts that were successfully closed. + *

+ * API name: {@code num_freed} */ public final int numFreed() { return this.numFreed; @@ -129,7 +134,10 @@ public static class Builder extends WithJsonObjectBuilderBase private Integer numFreed; /** - * Required - API name: {@code succeeded} + * Required - If true, all search contexts associated with the + * point-in-time ID were successfully closed. + *

+ * API name: {@code succeeded} */ public final Builder succeeded(boolean value) { this.succeeded = value; @@ -137,7 +145,9 @@ public final Builder succeeded(boolean value) { } /** - * Required - API name: {@code num_freed} + * Required - The number of search contexts that were successfully closed. + *

+ * API name: {@code num_freed} */ public final Builder numFreed(int value) { this.numFreed = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CountRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CountRequest.java index 82cd9a5dd..06a711758 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CountRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CountRequest.java @@ -66,6 +66,18 @@ /** * Count search results. Get the number of documents matching a query. + *

+ * The query can be provided either by using a simple query string as a + * parameter, or by defining Query DSL within the request body. The query is + * optional. When no query is provided, the API uses match_all to + * count all the documents. + *

+ * The count API supports multi-target syntax. You can run a single count API + * search across multiple data streams and indices. + *

+ * The operation is broadcast across all shards. For each shard ID group, a + * replica is chosen and the search is run against it. This means that replicas + * increase the scalability of the count. * * @see API * specification @@ -149,7 +161,9 @@ public static CountRequest of(Function> fn) * If false, the request returns an error if any wildcard * expression, index alias, or _all value targets only missing or * closed indices. This behavior applies even if the request targets other open - * indices. + * indices. For example, a request targeting foo*,bar* returns an + * error if an index starts with foo but no index starts with + * bar. *

* API name: {@code allow_no_indices} */ @@ -160,7 +174,7 @@ public final Boolean allowNoIndices() { /** * If true, wildcard and prefix queries are analyzed. This - * parameter can only be used when the q query string parameter is + * parameter can be used only when the q query string parameter is * specified. *

* API name: {@code analyze_wildcard} @@ -171,8 +185,8 @@ public final Boolean analyzeWildcard() { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -183,7 +197,7 @@ public final String analyzer() { /** * The default operator for query string query: AND or - * OR. This parameter can only be used when the q + * OR. This parameter can be used only when the q * query string parameter is specified. *

* API name: {@code default_operator} @@ -194,8 +208,8 @@ public final Operator defaultOperator() { } /** - * Field to use as default where no field prefix is given in the query string. - * This parameter can only be used when the q query string + * The field to use as a default when no field prefix is given in the query + * string. This parameter can be used only when the q query string * parameter is specified. *

* API name: {@code df} @@ -206,9 +220,9 @@ public final String df() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. *

* API name: {@code expand_wildcards} @@ -218,11 +232,14 @@ public final List expandWildcards() { } /** - * If true, concrete, expanded or aliased indices are ignored when + * If true, concrete, expanded, or aliased indices are ignored when * frozen. *

* API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated @Nullable public final Boolean ignoreThrottled() { return this.ignoreThrottled; @@ -240,8 +257,8 @@ public final Boolean ignoreUnavailable() { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -252,7 +269,8 @@ public final List index() { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -262,8 +280,8 @@ public final Boolean lenient() { } /** - * Sets the minimum _score value that documents must have to be - * included in the result. + * The minimum _score value that documents must have to be included + * in the result. *

* API name: {@code min_score} */ @@ -273,8 +291,8 @@ public final Double minScore() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, it is + * random. *

* API name: {@code preference} */ @@ -284,7 +302,8 @@ public final String preference() { } /** - * Query in the Lucene query string syntax. + * The query in Lucene query string syntax. This parameter cannot be used with a + * request body. *

* API name: {@code q} */ @@ -294,7 +313,8 @@ public final String q() { } /** - * Defines the search definition using the Query DSL. + * Defines the search query using Query DSL. A request body query cannot be used + * with the q query string parameter. *

* API name: {@code query} */ @@ -304,7 +324,7 @@ public final Query query() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -314,10 +334,15 @@ public final String routing() { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects * documents before sorting. *

+ * IMPORTANT: Use with caution. Elasticsearch applies this parameter to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this parameter for requests that + * target data streams with backing indices across multiple data tiers. + *

* API name: {@code terminate_after} */ @Nullable @@ -403,7 +428,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme * If false, the request returns an error if any wildcard * expression, index alias, or _all value targets only missing or * closed indices. This behavior applies even if the request targets other open - * indices. + * indices. For example, a request targeting foo*,bar* returns an + * error if an index starts with foo but no index starts with + * bar. *

* API name: {@code allow_no_indices} */ @@ -414,7 +441,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { /** * If true, wildcard and prefix queries are analyzed. This - * parameter can only be used when the q query string parameter is + * parameter can be used only when the q query string parameter is * specified. *

* API name: {@code analyze_wildcard} @@ -425,8 +452,8 @@ public final Builder analyzeWildcard(@Nullable Boolean value) { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -437,7 +464,7 @@ public final Builder analyzer(@Nullable String value) { /** * The default operator for query string query: AND or - * OR. This parameter can only be used when the q + * OR. This parameter can be used only when the q * query string parameter is specified. *

* API name: {@code default_operator} @@ -448,8 +475,8 @@ public final Builder defaultOperator(@Nullable Operator value) { } /** - * Field to use as default where no field prefix is given in the query string. - * This parameter can only be used when the q query string + * The field to use as a default when no field prefix is given in the query + * string. This parameter can be used only when the q query string * parameter is specified. *

* API name: {@code df} @@ -460,9 +487,9 @@ public final Builder df(@Nullable String value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. *

* API name: {@code expand_wildcards} @@ -475,9 +502,9 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. *

* API name: {@code expand_wildcards} @@ -490,11 +517,14 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val } /** - * If true, concrete, expanded or aliased indices are ignored when + * If true, concrete, expanded, or aliased indices are ignored when * frozen. *

* API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated public final Builder ignoreThrottled(@Nullable Boolean value) { this.ignoreThrottled = value; return this; @@ -512,8 +542,8 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -526,8 +556,8 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -541,7 +571,8 @@ public final Builder index(String value, String... values) { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -551,8 +582,8 @@ public final Builder lenient(@Nullable Boolean value) { } /** - * Sets the minimum _score value that documents must have to be - * included in the result. + * The minimum _score value that documents must have to be included + * in the result. *

* API name: {@code min_score} */ @@ -562,8 +593,8 @@ public final Builder minScore(@Nullable Double value) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, it is + * random. *

* API name: {@code preference} */ @@ -573,7 +604,8 @@ public final Builder preference(@Nullable String value) { } /** - * Query in the Lucene query string syntax. + * The query in Lucene query string syntax. This parameter cannot be used with a + * request body. *

* API name: {@code q} */ @@ -583,7 +615,8 @@ public final Builder q(@Nullable String value) { } /** - * Defines the search definition using the Query DSL. + * Defines the search query using Query DSL. A request body query cannot be used + * with the q query string parameter. *

* API name: {@code query} */ @@ -593,7 +626,8 @@ public final Builder query(@Nullable Query value) { } /** - * Defines the search definition using the Query DSL. + * Defines the search query using Query DSL. A request body query cannot be used + * with the q query string parameter. *

* API name: {@code query} */ @@ -602,7 +636,7 @@ public final Builder query(Function> fn) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -612,10 +646,15 @@ public final Builder routing(@Nullable String value) { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects * documents before sorting. *

+ * IMPORTANT: Use with caution. Elasticsearch applies this parameter to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this parameter for requests that + * target data streams with backing indices across multiple data tiers. + *

* API name: {@code terminate_after} */ public final Builder terminateAfter(@Nullable Long value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CreateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CreateRequest.java index ec0f3b30f..1455f5be5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CreateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/CreateRequest.java @@ -67,9 +67,129 @@ // typedef: _global.create.Request /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create a new document in the index. + *

+ * You can index a new JSON document with the /<target>/_doc/ + * or /<target>/_create/<_id> APIs Using + * _create guarantees that the document is indexed only if it does + * not already exist. It returns a 409 response when a document with a same ID + * already exists in the index. To update an existing document, you must use the + * /<target>/_doc/ API. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add a document using the + * PUT /<target>/_create/<_id> or + * POST /<target>/_create/<_id> request formats, you + * must have the create_doc, create, + * index, or write index privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. * * @see API * specification @@ -130,7 +250,8 @@ public static CreateRequest of( } /** - * Required - Unique identifier for the document. + * Required - A unique identifier for the document. To automatically generate a + * document ID, use the POST /<target>/_doc/ request format. *

* API name: {@code id} */ @@ -139,11 +260,11 @@ public final String id() { } /** - * Required - Name of the data stream or index to target. If the target doesn’t - * exist and matches the name or wildcard (*) pattern of an index - * template with a data_stream definition, this request creates the - * data stream. If the target doesn’t exist and doesn’t match a data stream - * template, this request creates the index. + * Required - The name of the data stream or index to target. If the target + * doesn't exist and matches the name or wildcard (*) pattern of an + * index template with a data_stream definition, this request + * creates the data stream. If the target doesn't exist and doesn’t match a data + * stream template, this request creates the index. *

* API name: {@code index} */ @@ -152,10 +273,10 @@ public final String index() { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -167,10 +288,9 @@ public final String pipeline() { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -180,7 +300,7 @@ public final Refresh refresh() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -190,8 +310,18 @@ public final String routing() { } /** - * Period the request waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period the request waits for the following operations: automatic index + * creation, dynamic mapping updates, waiting for active shards. Elasticsearch + * waits for at least the specified timeout period before failing. The actual + * wait time could be longer, particularly when multiple waits occur. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -201,8 +331,8 @@ public final Time timeout() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The explicit version number for concurrency control. It must be a + * non-negative long number. *

* API name: {@code version} */ @@ -212,7 +342,7 @@ public final Long version() { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -223,8 +353,10 @@ public final VersionType versionType() { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ @@ -288,7 +420,8 @@ public static class Builder extends RequestBase.AbstractBuilder tDocumentSerializer; /** - * Required - Unique identifier for the document. + * Required - A unique identifier for the document. To automatically generate a + * document ID, use the POST /<target>/_doc/ request format. *

* API name: {@code id} */ @@ -298,11 +431,11 @@ public final Builder id(String value) { } /** - * Required - Name of the data stream or index to target. If the target doesn’t - * exist and matches the name or wildcard (*) pattern of an index - * template with a data_stream definition, this request creates the - * data stream. If the target doesn’t exist and doesn’t match a data stream - * template, this request creates the index. + * Required - The name of the data stream or index to target. If the target + * doesn't exist and matches the name or wildcard (*) pattern of an + * index template with a data_stream definition, this request + * creates the data stream. If the target doesn't exist and doesn’t match a data + * stream template, this request creates the index. *

* API name: {@code index} */ @@ -312,10 +445,10 @@ public final Builder index(String value) { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -327,10 +460,9 @@ public final Builder pipeline(@Nullable String value) { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -340,7 +472,7 @@ public final Builder refresh(@Nullable Refresh value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -350,8 +482,18 @@ public final Builder routing(@Nullable String value) { } /** - * Period the request waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period the request waits for the following operations: automatic index + * creation, dynamic mapping updates, waiting for active shards. Elasticsearch + * waits for at least the specified timeout period before failing. The actual + * wait time could be longer, particularly when multiple waits occur. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -361,8 +503,18 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period the request waits for the following operations: automatic index - * creation, dynamic mapping updates, waiting for active shards. + * The period the request waits for the following operations: automatic index + * creation, dynamic mapping updates, waiting for active shards. Elasticsearch + * waits for at least the specified timeout period before failing. The actual + * wait time could be longer, particularly when multiple waits occur. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -371,8 +523,8 @@ public final Builder timeout(Function * API name: {@code version} */ @@ -382,7 +534,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -393,8 +545,10 @@ public final Builder versionType(@Nullable VersionType value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ @@ -405,8 +559,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShard /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRequest.java index 5b10609a9..9a9260fc6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRequest.java @@ -71,7 +71,136 @@ // typedef: _global.delete_by_query.Request /** - * Delete documents. Deletes documents that match the specified query. + * Delete documents. + *

+ * Deletes documents that match the specified query. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • delete or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. When you submit a delete by query + * request, Elasticsearch gets a snapshot of the data stream or index when it + * begins processing the request and deletes matching documents using internal + * versioning. If a document changes between the time that the snapshot is taken + * and the delete operation is processed, it results in a version conflict and + * the delete operation fails. + *

+ * NOTE: Documents with a version equal to 0 cannot be deleted using delete by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing a delete by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents to delete. + * A bulk delete request is performed for each batch of matching documents. If a + * search or bulk request is rejected, the requests are retried up to 10 times, + * with exponential back off. If the maximum retry limit is reached, processing + * halts and all failed requests are returned in the response. Any delete + * requests that completed successfully still stick, they are not rolled back. + *

+ * You can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts the operation could attempt to delete more + * documents from the source than max_docs until it has + * successfully deleted max_docs documents, or it has gone through + * every document in the source query. + *

+ * Throttling delete requests + *

+ * To control the rate at which delete by query issues batches of delete + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to disable + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is + * set to 500: + * + *

+ * target_time = 1000 / 500 per second = 2 seconds
+ * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+ * 
+ * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch + * sizes cause Elasticsearch to create many requests and wait before starting + * the next set. This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Delete by query supports sliced scroll to parallelize the delete process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto lets Elasticsearch choose + * the number of slices to use. This setting will use one slice per shard, up to + * a certain limit. If there are multiple source data streams or indices, it + * will choose the number of slices based on the index or backing index with the + * smallest number of shards. Adding slices to the delete by query operation + * creates sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices only contains + * the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the earlier point about + * distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being deleted.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices + * hurts performance. Setting slices higher than the number of + * shards generally does not improve efficiency and adds overhead.
  • + *
  • Delete performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or delete performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Cancel a delete by query operation + *

+ * Any delete by query can be canceled using the task cancel API. For example: + * + *

+ * POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+ * 
+ * 
+ *

+ * The task ID can be found by using the get tasks API. + *

+ * Cancellation should happen quickly but might take a few seconds. The get task + * status API will continue to list the delete by query task until this task + * checks that it has been cancelled and terminates itself. * * @see API * specification @@ -229,7 +358,9 @@ public final Boolean allowNoIndices() { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -239,7 +370,8 @@ public final Boolean analyzeWildcard() { } /** - * Analyzer to use for the query string. + * Analyzer to use for the query string. This parameter can be used only when + * the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -261,7 +393,8 @@ public final Conflicts conflicts() { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -271,7 +404,9 @@ public final Operator defaultOperator() { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -281,12 +416,10 @@ public final String df() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} */ @@ -316,8 +449,8 @@ public final Boolean ignoreUnavailable() { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -328,7 +461,8 @@ public final List index() { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -348,7 +482,7 @@ public final Long maxDocs() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -359,7 +493,7 @@ public final String preference() { } /** - * Query in the Lucene query string syntax. + * A query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -369,7 +503,7 @@ public final String q() { } /** - * Specifies the documents to delete using the Query DSL. + * The documents to delete specified with Query DSL. *

* API name: {@code query} */ @@ -380,7 +514,10 @@ public final Query query() { /** * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * delete by query after the request completes. This is different than the + * delete API's refresh parameter, which causes just the shard that + * received the delete request to be refreshed. Unlike the delete API, it does + * not support wait_for. *

* API name: {@code refresh} */ @@ -411,7 +548,7 @@ public final Float requestsPerSecond() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -421,7 +558,7 @@ public final String routing() { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -431,7 +568,7 @@ public final Time scroll() { } /** - * Size of the scroll request that powers the operation. + * The size of the scroll request that powers the operation. *

* API name: {@code scroll_size} */ @@ -441,7 +578,7 @@ public final Long scrollSize() { } /** - * Explicit timeout for each search request. Defaults to no timeout. + * The explicit timeout for each search request. It defaults to no timeout. *

* API name: {@code search_timeout} */ @@ -451,8 +588,8 @@ public final Time searchTimeout() { } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. Available options include + * query_then_fetch and dfs_query_then_fetch. *

* API name: {@code search_type} */ @@ -483,7 +620,7 @@ public final Slices slices() { } /** - * A comma-separated list of <field>:<direction> pairs. + * A comma-separated list of <field>:<direction> pairs. *

* API name: {@code sort} */ @@ -492,7 +629,7 @@ public final List sort() { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -502,13 +639,14 @@ public final List stats() { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects - * documents before sorting. Use with caution. Elasticsearch applies this - * parameter to each shard handling the request. When possible, let - * Elasticsearch perform early termination automatically. Avoid specifying this - * parameter for requests that target data streams with backing indices across - * multiple data tiers. + * documents before sorting. + *

+ * Use with caution. Elasticsearch applies this parameter to each shard handling + * the request. When possible, let Elasticsearch perform early termination + * automatically. Avoid specifying this parameter for requests that target data + * streams with backing indices across multiple data tiers. *

* API name: {@code terminate_after} */ @@ -518,7 +656,7 @@ public final Long terminateAfter() { } /** - * Period each deletion request waits for active shards. + * The period each deletion request waits for active shards. *

* API name: {@code timeout} */ @@ -539,8 +677,10 @@ public final Boolean version() { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * timeout value controls how long each write request waits for + * unavailable shards to become available. *

* API name: {@code wait_for_active_shards} */ @@ -550,7 +690,12 @@ public final WaitForActiveShards waitForActiveShards() { } /** - * If true, the request blocks until the operation is complete. + * If true, the request blocks until the operation is complete. If + * false, Elasticsearch performs some preflight checks, launches + * the request, and returns a task you can use to cancel or get the status of + * the task. Elasticsearch creates a record of this task as a document at + * .tasks/task/${taskId}. When you are done with a task, you should + * delete the task document so Elasticsearch can reclaim the space. *

* API name: {@code wait_for_completion} */ @@ -708,7 +853,9 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -718,7 +865,8 @@ public final Builder analyzeWildcard(@Nullable Boolean value) { } /** - * Analyzer to use for the query string. + * Analyzer to use for the query string. This parameter can be used only when + * the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -740,7 +888,8 @@ public final Builder conflicts(@Nullable Conflicts value) { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -750,7 +899,9 @@ public final Builder defaultOperator(@Nullable Operator value) { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -760,12 +911,10 @@ public final Builder df(@Nullable String value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -777,12 +926,10 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -815,8 +962,8 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -829,8 +976,8 @@ public final Builder index(List list) { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -844,7 +991,8 @@ public final Builder index(String value, String... values) { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -864,7 +1012,7 @@ public final Builder maxDocs(@Nullable Long value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -875,7 +1023,7 @@ public final Builder preference(@Nullable String value) { } /** - * Query in the Lucene query string syntax. + * A query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -885,7 +1033,7 @@ public final Builder q(@Nullable String value) { } /** - * Specifies the documents to delete using the Query DSL. + * The documents to delete specified with Query DSL. *

* API name: {@code query} */ @@ -895,7 +1043,7 @@ public final Builder query(@Nullable Query value) { } /** - * Specifies the documents to delete using the Query DSL. + * The documents to delete specified with Query DSL. *

* API name: {@code query} */ @@ -905,7 +1053,10 @@ public final Builder query(Function> fn) { /** * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * delete by query after the request completes. This is different than the + * delete API's refresh parameter, which causes just the shard that + * received the delete request to be refreshed. Unlike the delete API, it does + * not support wait_for. *

* API name: {@code refresh} */ @@ -936,7 +1087,7 @@ public final Builder requestsPerSecond(@Nullable Float value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -946,7 +1097,7 @@ public final Builder routing(@Nullable String value) { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -956,7 +1107,7 @@ public final Builder scroll(@Nullable Time value) { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -965,7 +1116,7 @@ public final Builder scroll(Function> fn) { } /** - * Size of the scroll request that powers the operation. + * The size of the scroll request that powers the operation. *

* API name: {@code scroll_size} */ @@ -975,7 +1126,7 @@ public final Builder scrollSize(@Nullable Long value) { } /** - * Explicit timeout for each search request. Defaults to no timeout. + * The explicit timeout for each search request. It defaults to no timeout. *

* API name: {@code search_timeout} */ @@ -985,7 +1136,7 @@ public final Builder searchTimeout(@Nullable Time value) { } /** - * Explicit timeout for each search request. Defaults to no timeout. + * The explicit timeout for each search request. It defaults to no timeout. *

* API name: {@code search_timeout} */ @@ -994,8 +1145,8 @@ public final Builder searchTimeout(Function> f } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. Available options include + * query_then_fetch and dfs_query_then_fetch. *

* API name: {@code search_type} */ @@ -1045,7 +1196,7 @@ public final Builder slices(Function> fn) } /** - * A comma-separated list of <field>:<direction> pairs. + * A comma-separated list of <field>:<direction> pairs. *

* API name: {@code sort} *

@@ -1057,7 +1208,7 @@ public final Builder sort(List list) { } /** - * A comma-separated list of <field>:<direction> pairs. + * A comma-separated list of <field>:<direction> pairs. *

* API name: {@code sort} *

@@ -1069,7 +1220,7 @@ public final Builder sort(String value, String... values) { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -1082,7 +1233,7 @@ public final Builder stats(List list) { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -1095,13 +1246,14 @@ public final Builder stats(String value, String... values) { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects - * documents before sorting. Use with caution. Elasticsearch applies this - * parameter to each shard handling the request. When possible, let - * Elasticsearch perform early termination automatically. Avoid specifying this - * parameter for requests that target data streams with backing indices across - * multiple data tiers. + * documents before sorting. + *

+ * Use with caution. Elasticsearch applies this parameter to each shard handling + * the request. When possible, let Elasticsearch perform early termination + * automatically. Avoid specifying this parameter for requests that target data + * streams with backing indices across multiple data tiers. *

* API name: {@code terminate_after} */ @@ -1111,7 +1263,7 @@ public final Builder terminateAfter(@Nullable Long value) { } /** - * Period each deletion request waits for active shards. + * The period each deletion request waits for active shards. *

* API name: {@code timeout} */ @@ -1121,7 +1273,7 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period each deletion request waits for active shards. + * The period each deletion request waits for active shards. *

* API name: {@code timeout} */ @@ -1141,8 +1293,10 @@ public final Builder version(@Nullable Boolean value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * timeout value controls how long each write request waits for + * unavailable shards to become available. *

* API name: {@code wait_for_active_shards} */ @@ -1153,8 +1307,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. Set to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * timeout value controls how long each write request waits for + * unavailable shards to become available. *

* API name: {@code wait_for_active_shards} */ @@ -1164,7 +1320,12 @@ public final Builder waitForActiveShards( } /** - * If true, the request blocks until the operation is complete. + * If true, the request blocks until the operation is complete. If + * false, Elasticsearch performs some preflight checks, launches + * the request, and returns a task you can use to cancel or get the status of + * the task. Elasticsearch creates a record of this task as a document at + * .tasks/task/${taskId}. When you are done with a task, you should + * delete the task document so Elasticsearch can reclaim the space. *

* API name: {@code wait_for_completion} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryResponse.java index b6b010584..6bab73adb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryResponse.java @@ -143,6 +143,8 @@ public static DeleteByQueryResponse of(Function * API name: {@code batches} */ @Nullable @@ -151,6 +153,8 @@ public final Long batches() { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ @Nullable @@ -159,6 +163,13 @@ public final Long deleted() { } /** + * An array of failures if there were any unrecoverable errors during the + * process. If this array is not empty, the request ended abnormally because of + * those failures. Delete by query is implemented using batches and any failures + * cause the entire process to end but all failures in the current batch are + * collected into the array. You can use the conflicts option to + * prevent reindex from ending on version conflicts. + *

* API name: {@code failures} */ public final List failures() { @@ -166,6 +177,10 @@ public final List failures() { } /** + * This field is always equal to zero for delete by query. It exists only so + * that delete by query, update by query, and reindex APIs return responses with + * the same structure. + *

* API name: {@code noops} */ @Nullable @@ -174,6 +189,8 @@ public final Long noops() { } /** + * The number of requests per second effectively run during the delete by query. + *

* API name: {@code requests_per_second} */ @Nullable @@ -182,6 +199,10 @@ public final Float requestsPerSecond() { } /** + * The number of retries attempted by delete by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ @Nullable @@ -214,6 +235,9 @@ public final Time throttled() { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ @Nullable @@ -230,6 +254,11 @@ public final Time throttledUntil() { } /** + * This field should always be equal to zero in a _delete_by_query + * response. It has meaning only when using the task API, where it indicates the + * next time (in milliseconds since epoch) a throttled request will be run again + * in order to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ @Nullable @@ -238,6 +267,9 @@ public final Long throttledUntilMillis() { } /** + * If true, some requests run during the delete by query operation + * timed out. + *

* API name: {@code timed_out} */ @Nullable @@ -246,6 +278,8 @@ public final Boolean timedOut() { } /** + * The number of milliseconds from start to end of the whole operation. + *

* API name: {@code took} */ @Nullable @@ -254,6 +288,8 @@ public final Long took() { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ @Nullable @@ -262,6 +298,8 @@ public final Long total() { } /** + * The number of version conflicts that the delete by query hit. + *

* API name: {@code version_conflicts} */ @Nullable @@ -431,6 +469,8 @@ public static class Builder extends WithJsonObjectBuilderBase private Long versionConflicts; /** + * The number of scroll responses pulled back by the delete by query. + *

* API name: {@code batches} */ public final Builder batches(@Nullable Long value) { @@ -439,6 +479,8 @@ public final Builder batches(@Nullable Long value) { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ public final Builder deleted(@Nullable Long value) { @@ -447,6 +489,13 @@ public final Builder deleted(@Nullable Long value) { } /** + * An array of failures if there were any unrecoverable errors during the + * process. If this array is not empty, the request ended abnormally because of + * those failures. Delete by query is implemented using batches and any failures + * cause the entire process to end but all failures in the current batch are + * collected into the array. You can use the conflicts option to + * prevent reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds all elements of list to failures. @@ -457,6 +506,13 @@ public final Builder failures(List list) { } /** + * An array of failures if there were any unrecoverable errors during the + * process. If this array is not empty, the request ended abnormally because of + * those failures. Delete by query is implemented using batches and any failures + * cause the entire process to end but all failures in the current batch are + * collected into the array. You can use the conflicts option to + * prevent reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds one or more values to failures. @@ -467,6 +523,13 @@ public final Builder failures(BulkIndexByScrollFailure value, BulkIndexByScrollF } /** + * An array of failures if there were any unrecoverable errors during the + * process. If this array is not empty, the request ended abnormally because of + * those failures. Delete by query is implemented using batches and any failures + * cause the entire process to end but all failures in the current batch are + * collected into the array. You can use the conflicts option to + * prevent reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds a value to failures using a builder lambda. @@ -477,6 +540,10 @@ public final Builder failures( } /** + * This field is always equal to zero for delete by query. It exists only so + * that delete by query, update by query, and reindex APIs return responses with + * the same structure. + *

* API name: {@code noops} */ public final Builder noops(@Nullable Long value) { @@ -485,6 +552,8 @@ public final Builder noops(@Nullable Long value) { } /** + * The number of requests per second effectively run during the delete by query. + *

* API name: {@code requests_per_second} */ public final Builder requestsPerSecond(@Nullable Float value) { @@ -493,6 +562,10 @@ public final Builder requestsPerSecond(@Nullable Float value) { } /** + * The number of retries attempted by delete by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ public final Builder retries(@Nullable Retries value) { @@ -501,6 +574,10 @@ public final Builder retries(@Nullable Retries value) { } /** + * The number of retries attempted by delete by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ public final Builder retries(Function> fn) { @@ -539,6 +616,9 @@ public final Builder throttled(Function> fn) { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ public final Builder throttledMillis(@Nullable Long value) { @@ -562,6 +642,11 @@ public final Builder throttledUntil(Function> } /** + * This field should always be equal to zero in a _delete_by_query + * response. It has meaning only when using the task API, where it indicates the + * next time (in milliseconds since epoch) a throttled request will be run again + * in order to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ public final Builder throttledUntilMillis(@Nullable Long value) { @@ -570,6 +655,9 @@ public final Builder throttledUntilMillis(@Nullable Long value) { } /** + * If true, some requests run during the delete by query operation + * timed out. + *

* API name: {@code timed_out} */ public final Builder timedOut(@Nullable Boolean value) { @@ -578,6 +666,8 @@ public final Builder timedOut(@Nullable Boolean value) { } /** + * The number of milliseconds from start to end of the whole operation. + *

* API name: {@code took} */ public final Builder took(@Nullable Long value) { @@ -586,6 +676,8 @@ public final Builder took(@Nullable Long value) { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ public final Builder total(@Nullable Long value) { @@ -594,6 +686,8 @@ public final Builder total(@Nullable Long value) { } /** + * The number of version conflicts that the delete by query hit. + *

* API name: {@code version_conflicts} */ public final Builder versionConflicts(@Nullable Long value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRethrottleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRethrottleRequest.java index 25a44a666..fd56b5193 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRethrottleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteByQueryRethrottleRequest.java @@ -89,7 +89,8 @@ public static DeleteByQueryRethrottleRequest of( } /** - * The throttle for this request in sub-requests per second. + * The throttle for this request in sub-requests per second. To disable + * throttling, set it to -1. *

* API name: {@code requests_per_second} */ @@ -122,7 +123,8 @@ public static class Builder extends RequestBase.AbstractBuilder private String taskId; /** - * The throttle for this request in sub-requests per second. + * The throttle for this request in sub-requests per second. To disable + * throttling, set it to -1. *

* API name: {@code requests_per_second} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteRequest.java index 8ced3a521..8057393df 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteRequest.java @@ -60,7 +60,57 @@ // typedef: _global.delete.Request /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. + *

+ * Remove a JSON document from the specified index. + *

+ * NOTE: You cannot send deletion requests directly to a data stream. To delete + * a document in a data stream, you must target the backing index containing the + * document. + *

+ * Optimistic concurrency control + *

+ * Delete operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Versioning + *

+ * Each document indexed is versioned. When deleting a document, the version can + * be specified to make sure the relevant document you are trying to delete is + * actually being deleted and it has not changed in the meantime. Every write + * operation run on a document, deletes included, causes its version to be + * incremented. The version number of a deleted document remains available for a + * short time after deletion to allow for control of concurrent operations. The + * length of time for which a deleted document's version remains available is + * determined by the index.gc_deletes index setting. + *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to delete a document. + *

+ * If the _routing mapping is set to required and no + * routing value is specified, the delete API throws a + * RoutingMissingException and rejects the request. + *

+ * For example: + * + *

+ * DELETE /my-index-000001/_doc/1?routing=shard-1
+ * 
+ * 
+ *

+ * This request deletes the document with ID 1, but it is routed based on the + * user. The document is not deleted if the correct routing is not specified. + *

+ * Distributed + *

+ * The delete operation gets hashed into a specific shard ID. It then gets + * redirected into the primary shard within that ID group and replicated (if + * needed) to shard replicas within that ID group. * * @see API * specification @@ -117,7 +167,7 @@ public static DeleteRequest of(Function> f } /** - * Required - Unique identifier for the document. + * Required - A unique identifier for the document. *

* API name: {@code id} */ @@ -146,7 +196,7 @@ public final Long ifSeqNo() { } /** - * Required - Name of the target index. + * Required - The name of the target index. *

* API name: {@code index} */ @@ -156,10 +206,9 @@ public final String index() { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -169,7 +218,7 @@ public final Refresh refresh() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -179,7 +228,14 @@ public final String routing() { } /** - * Period to wait for active shards. + * The period to wait for active shards. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the delete operation might not be available when the delete operation + * runs. Some reasons for this might be that the primary shard is currently + * recovering from a store or undergoing relocation. By default, the delete + * operation will wait on the primary shard to become available for up to 1 + * minute before failing and responding with an error. *

* API name: {@code timeout} */ @@ -189,8 +245,8 @@ public final Time timeout() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * An explicit version number for concurrency control. It must match the current + * version of the document for the request to succeed. *

* API name: {@code version} */ @@ -200,7 +256,7 @@ public final Long version() { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -210,9 +266,11 @@ public final VersionType versionType() { } /** - * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * The minimum number of shard copies that must be active before proceeding with + * the operation. You can set it to all or any positive integer up + * to the total number of shards in the index + * (number_of_replicas+1). The default value of 1 + * means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -257,7 +315,7 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private WaitForActiveShards waitForActiveShards; /** - * Required - Unique identifier for the document. + * Required - A unique identifier for the document. *

* API name: {@code id} */ @@ -287,7 +345,7 @@ public final Builder ifSeqNo(@Nullable Long value) { } /** - * Required - Name of the target index. + * Required - The name of the target index. *

* API name: {@code index} */ @@ -298,10 +356,9 @@ public final Builder index(String value) { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -311,7 +368,7 @@ public final Builder refresh(@Nullable Refresh value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -321,7 +378,14 @@ public final Builder routing(@Nullable String value) { } /** - * Period to wait for active shards. + * The period to wait for active shards. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the delete operation might not be available when the delete operation + * runs. Some reasons for this might be that the primary shard is currently + * recovering from a store or undergoing relocation. By default, the delete + * operation will wait on the primary shard to become available for up to 1 + * minute before failing and responding with an error. *

* API name: {@code timeout} */ @@ -331,7 +395,14 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period to wait for active shards. + * The period to wait for active shards. + *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the delete operation might not be available when the delete operation + * runs. Some reasons for this might be that the primary shard is currently + * recovering from a store or undergoing relocation. By default, the delete + * operation will wait on the primary shard to become available for up to 1 + * minute before failing and responding with an error. *

* API name: {@code timeout} */ @@ -340,8 +411,8 @@ public final Builder timeout(Function> fn) { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * An explicit version number for concurrency control. It must match the current + * version of the document for the request to succeed. *

* API name: {@code version} */ @@ -351,7 +422,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -361,9 +432,11 @@ public final Builder versionType(@Nullable VersionType value) { } /** - * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * The minimum number of shard copies that must be active before proceeding with + * the operation. You can set it to all or any positive integer up + * to the total number of shards in the index + * (number_of_replicas+1). The default value of 1 + * means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -373,9 +446,11 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { } /** - * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * The minimum number of shard copies that must be active before proceeding with + * the operation. You can set it to all or any positive integer up + * to the total number of shards in the index + * (number_of_replicas+1). The default value of 1 + * means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteScriptRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteScriptRequest.java index 3f3f8afaf..f13c910c7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteScriptRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/DeleteScriptRequest.java @@ -87,7 +87,7 @@ public static DeleteScriptRequest of(Function * API name: {@code id} */ @@ -96,8 +96,10 @@ public final String id() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -107,8 +109,9 @@ public final Time masterTimeout() { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. It can also be set + * to -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -135,7 +138,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Time timeout; /** - * Required - Identifier for the stored script or search template. + * Required - The identifier for the stored script or search template. *

* API name: {@code id} */ @@ -145,8 +148,10 @@ public final Builder id(String value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -156,8 +161,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -166,8 +173,9 @@ public final Builder masterTimeout(Function> f } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. It can also be set + * to -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -177,8 +185,9 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. It can also be set + * to -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsRequest.java index 3451c2bce..c15d86b83 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsRequest.java @@ -63,7 +63,29 @@ // typedef: _global.exists.Request /** - * Check a document. Checks if a specified document exists. + * Check a document. + *

+ * Verify that a document exists. For example, check to see if a document with + * the _id 0 exists: + * + *

+ * HEAD my-index-000001/_doc/0
+ * 
+ * 
+ *

+ * If the document exists, the API returns a status code of + * 200 - OK. If the document doesn’t exist, the API returns + * 404 - Not Found. + *

+ * Versioning support + *

+ * You can use the version parameter to check the document only if + * its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see API * specification @@ -125,8 +147,8 @@ public static ExistsRequest of(Function> f } /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -136,7 +158,10 @@ public final SourceConfigParam source() { } /** - * A comma-separated list of source fields to exclude in the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} */ @@ -145,7 +170,11 @@ public final List sourceExcludes() { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} */ @@ -154,7 +183,7 @@ public final List sourceIncludes() { } /** - * Required - Identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -163,8 +192,8 @@ public final String id() { } /** - * Required - Comma-separated list of data streams, indices, and aliases. - * Supports wildcards (*). + * Required - A comma-separated list of data streams, indices, and aliases. It + * supports wildcards (*). *

* API name: {@code index} */ @@ -173,8 +202,15 @@ public final String index() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. + *

+ * If it is set to _local, the operation will prefer to be run on a + * local allocated shard when possible. If it is set to a custom value, the + * value is used to guarantee that the same shards will be used for the same + * custom value. This can help with "jumping values" when hitting + * different shards in different refresh states. A sample value can be something + * like the web session ID or the user name. *

* API name: {@code preference} */ @@ -194,8 +230,10 @@ public final Boolean realtime() { } /** - * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -205,7 +243,7 @@ public final Boolean refresh() { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -215,9 +253,10 @@ public final String routing() { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. *

* API name: {@code stored_fields} */ @@ -237,7 +276,7 @@ public final Long version() { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -288,8 +327,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private VersionType versionType; /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -299,8 +338,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -309,7 +348,10 @@ public final Builder source(Function_source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -321,7 +363,10 @@ public final Builder sourceExcludes(List list) { } /** - * A comma-separated list of source fields to exclude in the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -333,7 +378,11 @@ public final Builder sourceExcludes(String value, String... values) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -345,7 +394,11 @@ public final Builder sourceIncludes(List list) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -357,7 +410,7 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * Required - Identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -367,8 +420,8 @@ public final Builder id(String value) { } /** - * Required - Comma-separated list of data streams, indices, and aliases. - * Supports wildcards (*). + * Required - A comma-separated list of data streams, indices, and aliases. It + * supports wildcards (*). *

* API name: {@code index} */ @@ -378,8 +431,15 @@ public final Builder index(String value) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. + *

+ * If it is set to _local, the operation will prefer to be run on a + * local allocated shard when possible. If it is set to a custom value, the + * value is used to guarantee that the same shards will be used for the same + * custom value. This can help with "jumping values" when hitting + * different shards in different refresh states. A sample value can be something + * like the web session ID or the user name. *

* API name: {@code preference} */ @@ -399,8 +459,10 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -410,7 +472,7 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -420,9 +482,10 @@ public final Builder routing(@Nullable String value) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. *

* API name: {@code stored_fields} *

@@ -434,9 +497,10 @@ public final Builder storedFields(List list) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. *

* API name: {@code stored_fields} *

@@ -459,7 +523,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsSourceRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsSourceRequest.java index d3621903e..1b74d7908 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsSourceRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExistsSourceRequest.java @@ -63,8 +63,16 @@ // typedef: _global.exists_source.Request /** - * Check for a document source. Checks if a document's _source is - * stored. + * Check for a document source. + *

+ * Check whether a document source exists in an index. For example: + * + *

+ * HEAD my-index-000001/_source/1
+ * 
+ * 
+ *

+ * A document's source is not available if it is disabled in the mapping. * * @see API * specification @@ -123,8 +131,8 @@ public static ExistsSourceRequest of(Functiontrue or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -152,7 +160,7 @@ public final List sourceIncludes() { } /** - * Required - Identifier of the document. + * Required - A unique identifier for the document. *

* API name: {@code id} */ @@ -161,8 +169,8 @@ public final String id() { } /** - * Required - Comma-separated list of data streams, indices, and aliases. - * Supports wildcards (*). + * Required - A comma-separated list of data streams, indices, and aliases. It + * supports wildcards (*). *

* API name: {@code index} */ @@ -171,8 +179,8 @@ public final String index() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. *

* API name: {@code preference} */ @@ -182,7 +190,7 @@ public final String preference() { } /** - * If true, the request is real-time as opposed to near-real-time. + * If true, the request is real-time as opposed to near-real-time. *

* API name: {@code realtime} */ @@ -192,8 +200,10 @@ public final Boolean realtime() { } /** - * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -203,7 +213,7 @@ public final Boolean refresh() { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -213,8 +223,8 @@ public final String routing() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -224,7 +234,7 @@ public final Long version() { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -274,8 +284,8 @@ public static class Builder extends RequestBase.AbstractBuilder private VersionType versionType; /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -285,8 +295,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * true or false to return the _source - * field or not, or a list of fields to return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -343,7 +353,7 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * Required - Identifier of the document. + * Required - A unique identifier for the document. *

* API name: {@code id} */ @@ -353,8 +363,8 @@ public final Builder id(String value) { } /** - * Required - Comma-separated list of data streams, indices, and aliases. - * Supports wildcards (*). + * Required - A comma-separated list of data streams, indices, and aliases. It + * supports wildcards (*). *

* API name: {@code index} */ @@ -364,8 +374,8 @@ public final Builder index(String value) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. *

* API name: {@code preference} */ @@ -375,7 +385,7 @@ public final Builder preference(@Nullable String value) { } /** - * If true, the request is real-time as opposed to near-real-time. + * If true, the request is real-time as opposed to near-real-time. *

* API name: {@code realtime} */ @@ -385,8 +395,10 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * If true, Elasticsearch refreshes all shards involved in the - * delete by query after the request completes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -396,7 +408,7 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -406,8 +418,8 @@ public final Builder routing(@Nullable String value) { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -417,7 +429,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExplainRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExplainRequest.java index 3c05771e0..fcca0bdec 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExplainRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ExplainRequest.java @@ -63,8 +63,9 @@ // typedef: _global.explain.Request /** - * Explain a document match result. Returns information about why a specific - * document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific + * document matches, or doesn't match, a query. It computes a score explanation + * for a query and a specific document. * * @see API * specification @@ -138,8 +139,8 @@ public static ExplainRequest of(Function> } /** - * True or false to return the _source field or not, or a list of - * fields to return. + * True or false to return the _source + * field or not or a list of fields to return. *

* API name: {@code _source} */ @@ -149,7 +150,10 @@ public final SourceConfigParam source() { } /** - * A comma-separated list of source fields to exclude from the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} */ @@ -158,7 +162,11 @@ public final List sourceExcludes() { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} */ @@ -167,7 +175,9 @@ public final List sourceIncludes() { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -177,8 +187,8 @@ public final Boolean analyzeWildcard() { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -189,7 +199,8 @@ public final String analyzer() { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -199,7 +210,9 @@ public final Operator defaultOperator() { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -209,7 +222,7 @@ public final String df() { } /** - * Required - Defines the document ID. + * Required - The document identifier. *

* API name: {@code id} */ @@ -218,8 +231,8 @@ public final String id() { } /** - * Required - Index names used to limit the request. Only a single index name - * can be provided to this parameter. + * Required - Index names that are used to limit the request. Only a single + * index name can be provided to this parameter. *

* API name: {@code index} */ @@ -229,7 +242,8 @@ public final String index() { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -239,7 +253,7 @@ public final Boolean lenient() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -250,7 +264,7 @@ public final String preference() { } /** - * Query in the Lucene query string syntax. + * The query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -270,7 +284,7 @@ public final Query query() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -358,8 +372,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private List storedFields; /** - * True or false to return the _source field or not, or a list of - * fields to return. + * True or false to return the _source + * field or not or a list of fields to return. *

* API name: {@code _source} */ @@ -369,8 +383,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * True or false to return the _source field or not, or a list of - * fields to return. + * True or false to return the _source + * field or not or a list of fields to return. *

* API name: {@code _source} */ @@ -379,7 +393,10 @@ public final Builder source(Function_source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -391,7 +408,10 @@ public final Builder sourceExcludes(List list) { } /** - * A comma-separated list of source fields to exclude from the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -403,7 +423,11 @@ public final Builder sourceExcludes(String value, String... values) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -415,7 +439,11 @@ public final Builder sourceIncludes(List list) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -427,7 +455,9 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -437,8 +467,8 @@ public final Builder analyzeWildcard(@Nullable Boolean value) { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -449,7 +479,8 @@ public final Builder analyzer(@Nullable String value) { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -459,7 +490,9 @@ public final Builder defaultOperator(@Nullable Operator value) { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -469,7 +502,7 @@ public final Builder df(@Nullable String value) { } /** - * Required - Defines the document ID. + * Required - The document identifier. *

* API name: {@code id} */ @@ -479,8 +512,8 @@ public final Builder id(String value) { } /** - * Required - Index names used to limit the request. Only a single index name - * can be provided to this parameter. + * Required - Index names that are used to limit the request. Only a single + * index name can be provided to this parameter. *

* API name: {@code index} */ @@ -491,7 +524,8 @@ public final Builder index(String value) { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -501,7 +535,7 @@ public final Builder lenient(@Nullable Boolean value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -512,7 +546,7 @@ public final Builder preference(@Nullable String value) { } /** - * Query in the Lucene query string syntax. + * The query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -541,7 +575,7 @@ public final Builder query(Function> fn) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsRequest.java index 49835f35f..bcd65fa78 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsRequest.java @@ -142,7 +142,7 @@ public final Boolean allowNoIndices() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. @@ -154,7 +154,7 @@ public final List expandWildcards() { } /** - * List of fields to retrieve capabilities for. Wildcard (*) + * A list of fields to retrieve capabilities for. Wildcard (*) * expressions are supported. *

* API name: {@code fields} @@ -164,8 +164,7 @@ public final List fields() { } /** - * An optional set of filters: can include - * +metadata,-metadata,-nested,-multifield,-parent + * A comma-separated list of filters to apply to the response. *

* API name: {@code filters} */ @@ -206,9 +205,9 @@ public final Boolean includeUnmapped() { } /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and indices, omit - * this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (*). To target all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} */ @@ -217,9 +216,17 @@ public final List index() { } /** - * Allows to filter indices if the provided query rewrites to match_none on + * Filter indices if the provided query rewrites to match_none on * every shard. *

+ * IMPORTANT: The filtering is done on a best-effort basis, it uses index + * statistics and mappings to rewrite queries to match_none instead + * of fully running the request. For instance a range query over a date field + * can rewrite to match_none if all documents within a shard + * (including deleted documents) are outside of the provided range. However, not + * all queries can rewrite to match_none so this API may return an + * index even if the provided filter matches no document. + *

* API name: {@code index_filter} */ @Nullable @@ -228,7 +235,7 @@ public final Query indexFilter() { } /** - * Defines ad-hoc runtime fields in the request similar to the way it is done in + * Define ad-hoc runtime fields in the request similar to the way it is done in * search requests. These fields exist only as part of the query and take * precedence over fields defined with the same name in the index mappings. *

@@ -239,7 +246,9 @@ public final Map runtimeMappings() { } /** - * Only return results for fields that have one of the types in the list + * A comma-separated list of field types to include. Any fields that do not + * match one of these types will be excluded from the results. It defaults to + * empty, meaning that all field types are returned. *

* API name: {@code types} */ @@ -344,7 +353,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. @@ -359,7 +368,7 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. @@ -374,7 +383,7 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val } /** - * List of fields to retrieve capabilities for. Wildcard (*) + * A list of fields to retrieve capabilities for. Wildcard (*) * expressions are supported. *

* API name: {@code fields} @@ -387,7 +396,7 @@ public final Builder fields(List list) { } /** - * List of fields to retrieve capabilities for. Wildcard (*) + * A list of fields to retrieve capabilities for. Wildcard (*) * expressions are supported. *

* API name: {@code fields} @@ -400,8 +409,7 @@ public final Builder fields(String value, String... values) { } /** - * An optional set of filters: can include - * +metadata,-metadata,-nested,-multifield,-parent + * A comma-separated list of filters to apply to the response. *

* API name: {@code filters} */ @@ -442,9 +450,9 @@ public final Builder includeUnmapped(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and indices, omit - * this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (*). To target all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} *

@@ -456,9 +464,9 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases used to limit the - * request. Supports wildcards (*). To target all data streams and indices, omit - * this parameter or use * or _all. + * A comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (*). To target all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} *

@@ -470,9 +478,17 @@ public final Builder index(String value, String... values) { } /** - * Allows to filter indices if the provided query rewrites to match_none on + * Filter indices if the provided query rewrites to match_none on * every shard. *

+ * IMPORTANT: The filtering is done on a best-effort basis, it uses index + * statistics and mappings to rewrite queries to match_none instead + * of fully running the request. For instance a range query over a date field + * can rewrite to match_none if all documents within a shard + * (including deleted documents) are outside of the provided range. However, not + * all queries can rewrite to match_none so this API may return an + * index even if the provided filter matches no document. + *

* API name: {@code index_filter} */ public final Builder indexFilter(@Nullable Query value) { @@ -481,9 +497,17 @@ public final Builder indexFilter(@Nullable Query value) { } /** - * Allows to filter indices if the provided query rewrites to match_none on + * Filter indices if the provided query rewrites to match_none on * every shard. *

+ * IMPORTANT: The filtering is done on a best-effort basis, it uses index + * statistics and mappings to rewrite queries to match_none instead + * of fully running the request. For instance a range query over a date field + * can rewrite to match_none if all documents within a shard + * (including deleted documents) are outside of the provided range. However, not + * all queries can rewrite to match_none so this API may return an + * index even if the provided filter matches no document. + *

* API name: {@code index_filter} */ public final Builder indexFilter(Function> fn) { @@ -491,7 +515,7 @@ public final Builder indexFilter(Function> f } /** - * Defines ad-hoc runtime fields in the request similar to the way it is done in + * Define ad-hoc runtime fields in the request similar to the way it is done in * search requests. These fields exist only as part of the query and take * precedence over fields defined with the same name in the index mappings. *

@@ -505,7 +529,7 @@ public final Builder runtimeMappings(Map map) { } /** - * Defines ad-hoc runtime fields in the request similar to the way it is done in + * Define ad-hoc runtime fields in the request similar to the way it is done in * search requests. These fields exist only as part of the query and take * precedence over fields defined with the same name in the index mappings. *

@@ -519,7 +543,7 @@ public final Builder runtimeMappings(String key, RuntimeField value) { } /** - * Defines ad-hoc runtime fields in the request similar to the way it is done in + * Define ad-hoc runtime fields in the request similar to the way it is done in * search requests. These fields exist only as part of the query and take * precedence over fields defined with the same name in the index mappings. *

@@ -533,7 +557,9 @@ public final Builder runtimeMappings(String key, } /** - * Only return results for fields that have one of the types in the list + * A comma-separated list of field types to include. Any fields that do not + * match one of these types will be excluded from the results. It defaults to + * empty, meaning that all field types are returned. *

* API name: {@code types} *

@@ -545,7 +571,9 @@ public final Builder types(List list) { } /** - * Only return results for fields that have one of the types in the list + * A comma-separated list of field types to include. Any fields that do not + * match one of these types will be excluded from the results. It defaults to + * empty, meaning that all field types are returned. *

* API name: {@code types} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsResponse.java index b430d819f..b828118cb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/FieldCapsResponse.java @@ -80,7 +80,10 @@ public static FieldCapsResponse of(Function + * API name: {@code indices} */ public final List indices() { return this.indices; @@ -153,7 +156,10 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Map> fields; /** - * Required - API name: {@code indices} + * Required - The list of indices where this field has the same type family, or + * null if all indices have the same type family for the field. + *

+ * API name: {@code indices} *

* Adds all elements of list to indices. */ @@ -163,7 +169,10 @@ public final Builder indices(List list) { } /** - * Required - API name: {@code indices} + * Required - The list of indices where this field has the same type family, or + * null if all indices have the same type family for the field. + *

+ * API name: {@code indices} *

* Adds one or more values to indices. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java index db5397c67..33e0e492a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java @@ -61,8 +61,78 @@ // typedef: _global.get.Request /** - * Get a document by its ID. Retrieves the document with the specified ID from - * an index. + * Get a document by its ID. + *

+ * Get a document and its source or stored fields from an index. + *

+ * By default, this API is realtime and is not affected by the refresh rate of + * the index (when data will become visible for search). In the case where + * stored fields are requested with the stored_fields parameter and + * the document has been updated but is not yet refreshed, the API will have to + * parse and analyze the source to extract the stored fields. To turn off + * realtime behavior, set the realtime parameter to false. + *

+ * Source filtering + *

+ * By default, the API returns the contents of the _source field + * unless you have used the stored_fields parameter or the + * _source field is turned off. You can turn off + * _source retrieval by using the _source parameter: + * + *

+ * GET my-index-000001/_doc/0?_source=false
+ * 
+ * 
+ *

+ * If you only need one or two fields from the _source, use the + * _source_includes or _source_excludes parameters to + * include or filter out particular fields. This can be helpful with large + * documents where partial retrieval can save on network overhead Both + * parameters take a comma separated list of fields or wildcard expressions. For + * example: + * + *

+ * GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+ * 
+ * 
+ *

+ * If you only want to specify includes, you can use a shorter notation: + * + *

+ * GET my-index-000001/_doc/0?_source=*.id
+ * 
+ * 
+ *

+ * Routing + *

+ * If routing is used during indexing, the routing value also needs to be + * specified to retrieve a document. For example: + * + *

+ * GET my-index-000001/_doc/2?routing=user1
+ * 
+ * 
+ *

+ * This request gets the document with ID 2, but it is routed based on the user. + * The document is not fetched if the correct routing is not specified. + *

+ * Distributed + *

+ * The GET operation is hashed into a specific shard ID. It is then redirected + * to one of the replicas within that shard ID and returns the result. The + * replicas are the primary shard and its replicas within that shard ID group. + * This means that the more replicas you have, the better your GET scaling will + * be. + *

+ * Versioning support + *

+ * You can use the version parameter to retrieve the document only + * if its current version is equal to the specified one. + *

+ * Internally, Elasticsearch has marked the old document as deleted and added an + * entirely new document. The old version of the document doesn't disappear + * immediately, although you won't be able to access it. Elasticsearch cleans up + * deleted documents in the background as you continue to index more data. * * @see API * specification @@ -128,8 +198,8 @@ public static GetRequest of(Function> fn) { } /** - * True or false to return the _source field or not, or a list of fields to - * return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -139,7 +209,10 @@ public final SourceConfigParam source() { } /** - * A comma-separated list of source fields to exclude in the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} */ @@ -148,7 +221,11 @@ public final List sourceExcludes() { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} */ @@ -157,10 +234,10 @@ public final List sourceIncludes() { } /** - * Should this request force synthetic _source? Use this to test if the mapping - * supports synthetic _source and to get a sense of the worst case performance. - * Fetches with this enabled will be slower the enabling synthetic source - * natively in the index. + * Indicates whether the request forces synthetic _source. Use this + * paramater to test if the mapping supports synthetic _source and + * to get a sense of the worst case performance. Fetches with this parameter + * enabled will be slower than enabling synthetic source natively in the index. *

* API name: {@code force_synthetic_source} */ @@ -170,7 +247,7 @@ public final Boolean forceSyntheticSource() { } /** - * Required - Unique identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -179,7 +256,7 @@ public final String id() { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -188,8 +265,15 @@ public final String index() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. + *

+ * If it is set to _local, the operation will prefer to be run on a + * local allocated shard when possible. If it is set to a custom value, the + * value is used to guarantee that the same shards will be used for the same + * custom value. This can help with "jumping values" when hitting + * different shards in different refresh states. A sample value can be something + * like the web session ID or the user name. *

* API name: {@code preference} */ @@ -209,8 +293,10 @@ public final Boolean realtime() { } /** - * If true, Elasticsearch refreshes the affected shards to make this operation - * visible to search. If false, do nothing with refreshes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -220,7 +306,7 @@ public final Boolean refresh() { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -230,9 +316,12 @@ public final String routing() { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. Only leaf fields can be retrieved with the + * stored_field option. Object fields can't be returned;​if + * specified, the request fails. *

* API name: {@code stored_fields} */ @@ -241,8 +330,8 @@ public final List storedFields() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -252,7 +341,7 @@ public final Long version() { } /** - * Specific version type: internal, external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -306,8 +395,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private VersionType versionType; /** - * True or false to return the _source field or not, or a list of fields to - * return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -317,8 +406,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * True or false to return the _source field or not, or a list of fields to - * return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -327,7 +416,10 @@ public final Builder source(Function_source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -339,7 +431,10 @@ public final Builder sourceExcludes(List list) { } /** - * A comma-separated list of source fields to exclude in the response. + * A comma-separated list of source fields to exclude from the response. You can + * also use this parameter to exclude fields from the subset specified in + * _source_includes query parameter. If the _source + * parameter is false, this parameter is ignored. *

* API name: {@code _source_excludes} *

@@ -351,7 +446,11 @@ public final Builder sourceExcludes(String value, String... values) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -363,7 +462,11 @@ public final Builder sourceIncludes(List list) { } /** - * A comma-separated list of source fields to include in the response. + * A comma-separated list of source fields to include in the response. If this + * parameter is specified, only these source fields are returned. You can + * exclude fields from this subset using the _source_excludes query + * parameter. If the _source parameter is false, this + * parameter is ignored. *

* API name: {@code _source_includes} *

@@ -375,10 +478,10 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * Should this request force synthetic _source? Use this to test if the mapping - * supports synthetic _source and to get a sense of the worst case performance. - * Fetches with this enabled will be slower the enabling synthetic source - * natively in the index. + * Indicates whether the request forces synthetic _source. Use this + * paramater to test if the mapping supports synthetic _source and + * to get a sense of the worst case performance. Fetches with this parameter + * enabled will be slower than enabling synthetic source natively in the index. *

* API name: {@code force_synthetic_source} */ @@ -388,7 +491,7 @@ public final Builder forceSyntheticSource(@Nullable Boolean value) { } /** - * Required - Unique identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -398,7 +501,7 @@ public final Builder id(String value) { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -408,8 +511,15 @@ public final Builder index(String value) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. + *

+ * If it is set to _local, the operation will prefer to be run on a + * local allocated shard when possible. If it is set to a custom value, the + * value is used to guarantee that the same shards will be used for the same + * custom value. This can help with "jumping values" when hitting + * different shards in different refresh states. A sample value can be something + * like the web session ID or the user name. *

* API name: {@code preference} */ @@ -429,8 +539,10 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * If true, Elasticsearch refreshes the affected shards to make this operation - * visible to search. If false, do nothing with refreshes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -440,7 +552,7 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -450,9 +562,12 @@ public final Builder routing(@Nullable String value) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. Only leaf fields can be retrieved with the + * stored_field option. Object fields can't be returned;​if + * specified, the request fails. *

* API name: {@code stored_fields} *

@@ -464,9 +579,12 @@ public final Builder storedFields(List list) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source parameter defaults to + * false. Only leaf fields can be retrieved with the + * stored_field option. Object fields can't be returned;​if + * specified, the request fails. *

* API name: {@code stored_fields} *

@@ -478,8 +596,8 @@ public final Builder storedFields(String value, String... values) { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -489,7 +607,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: internal, external, external_gte. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetScriptRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetScriptRequest.java index e1682f23b..77b3dc6cd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetScriptRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetScriptRequest.java @@ -83,7 +83,7 @@ public static GetScriptRequest of(Function * API name: {@code id} */ @@ -92,7 +92,10 @@ public final String id() { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ @@ -116,7 +119,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Time masterTimeout; /** - * Required - Identifier for the stored script or search template. + * Required - The identifier for the stored script or search template. *

* API name: {@code id} */ @@ -126,7 +129,10 @@ public final Builder id(String value) { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ @@ -136,7 +142,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetSourceRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetSourceRequest.java index 2e59164b3..261181e5d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetSourceRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetSourceRequest.java @@ -61,8 +61,23 @@ // typedef: _global.get_source.Request /** - * Get a document's source. Returns the source of a document. + * Get a document's source. + *

+ * Get the source of a document. For example: * + *

+ * GET my-index-000001/_source/1
+ * 
+ * 
+ *

+ * You can use the source filtering parameters to control which parts of the + * _source are returned: + * + *

+ * GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+ * 
+ * 
+ * * @see API * specification */ @@ -123,8 +138,8 @@ public static GetSourceRequest of(Function_source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -152,7 +167,7 @@ public final List sourceIncludes() { } /** - * Required - Unique identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -161,7 +176,7 @@ public final String id() { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -170,8 +185,8 @@ public final String index() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. *

* API name: {@code preference} */ @@ -181,7 +196,7 @@ public final String preference() { } /** - * Boolean) If true, the request is real-time as opposed to near-real-time. + * If true, the request is real-time as opposed to near-real-time. *

* API name: {@code realtime} */ @@ -191,8 +206,10 @@ public final Boolean realtime() { } /** - * If true, Elasticsearch refreshes the affected shards to make this operation - * visible to search. If false, do nothing with refreshes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -202,7 +219,7 @@ public final Boolean refresh() { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -212,6 +229,8 @@ public final String routing() { } /** + * A comma-separated list of stored fields to return as part of a hit. + *

* API name: {@code stored_fields} */ public final List storedFields() { @@ -219,8 +238,8 @@ public final List storedFields() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -230,7 +249,7 @@ public final Long version() { } /** - * Specific version type: internal, external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -283,8 +302,8 @@ public static class Builder extends RequestBase.AbstractBuilder private VersionType versionType; /** - * True or false to return the _source field or not, or a list of fields to - * return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -294,8 +313,8 @@ public final Builder source(@Nullable SourceConfigParam value) { } /** - * True or false to return the _source field or not, or a list of fields to - * return. + * Indicates whether to return the _source field (true + * or false) or lists the fields to return. *

* API name: {@code _source} */ @@ -352,7 +371,7 @@ public final Builder sourceIncludes(String value, String... values) { } /** - * Required - Unique identifier of the document. + * Required - A unique document identifier. *

* API name: {@code id} */ @@ -362,7 +381,7 @@ public final Builder id(String value) { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -372,8 +391,8 @@ public final Builder index(String value) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, the + * operation is randomized between the shard replicas. *

* API name: {@code preference} */ @@ -383,7 +402,7 @@ public final Builder preference(@Nullable String value) { } /** - * Boolean) If true, the request is real-time as opposed to near-real-time. + * If true, the request is real-time as opposed to near-real-time. *

* API name: {@code realtime} */ @@ -393,8 +412,10 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * If true, Elasticsearch refreshes the affected shards to make this operation - * visible to search. If false, do nothing with refreshes. + * If true, the request refreshes the relevant shards before + * retrieving the document. Setting it to true should be done after + * careful thought and verification that this does not cause a heavy load on the + * system (and slow down indexing). *

* API name: {@code refresh} */ @@ -404,7 +425,7 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * Target the specified primary shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -414,6 +435,8 @@ public final Builder routing(@Nullable String value) { } /** + * A comma-separated list of stored fields to return as part of a hit. + *

* API name: {@code stored_fields} *

* Adds all elements of list to storedFields. @@ -424,6 +447,8 @@ public final Builder storedFields(List list) { } /** + * A comma-separated list of stored fields to return as part of a hit. + *

* API name: {@code stored_fields} *

* Adds one or more values to storedFields. @@ -434,8 +459,8 @@ public final Builder storedFields(String value, String... values) { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * The version number for concurrency control. It must match the current version + * of the document for the request to succeed. *

* API name: {@code version} */ @@ -445,7 +470,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: internal, external, external_gte. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/IndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/IndexRequest.java index 34542789c..fa429a5c3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/IndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/IndexRequest.java @@ -69,10 +69,196 @@ // typedef: _global.index.Request /** - * Index a document. Adds a JSON document to the specified data stream or index - * and makes it searchable. If the target is an index and the document already - * exists, the request updates the document and increments its version. + * Create or update a document in an index. + *

+ * Add a JSON document to the specified data stream or index and make it + * searchable. If the target is an index and the document already exists, the + * request updates the document and increments its version. + *

+ * NOTE: You cannot use this API to send update requests for existing documents + * in a data stream. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or index alias: + *

    + *
  • To add or overwrite a document using the + * PUT /<target>/_doc/<_id> request format, you must + * have the create, index, or write index + * privilege.
  • + *
  • To add a document using the POST /<target>/_doc/ + * request format, you must have the create_doc, + * create, index, or write index + * privilege.
  • + *
  • To automatically create a data stream or index with this API request, you + * must have the auto_configure, create_index, or + * manage index privilege.
  • + *
+ *

+ * Automatic data stream creation requires a matching index template with data + * stream enabled. + *

+ * NOTE: Replica shards might not all be started when an indexing operation + * returns successfully. By default, only the primary is required. Set + * wait_for_active_shards to change this default behavior. + *

+ * Automatically create data streams and indices + *

+ * If the request's target doesn't exist and matches an index template with a + * data_stream definition, the index operation automatically + * creates the data stream. + *

+ * If the target doesn't exist and doesn't match a data stream template, the + * operation automatically creates the index and applies any matching index + * templates. + *

+ * NOTE: Elasticsearch includes several built-in index templates. To avoid + * naming collisions with these templates, refer to index pattern documentation. + *

+ * If no mapping exists, the index operation creates a dynamic mapping. By + * default, new fields and objects are automatically added to the mapping if + * needed. + *

+ * Automatic index creation is controlled by the + * action.auto_create_index setting. If it is true, + * any index can be created automatically. You can modify this setting to + * explicitly allow or block automatic creation of indices that match specified + * patterns or set it to false to turn off automatic index creation + * entirely. Specify a comma-separated list of patterns you want to allow or + * prefix each pattern with + or - to indicate whether + * it should be allowed or blocked. When a list is specified, the default + * behaviour is to disallow. + *

+ * NOTE: The action.auto_create_index setting affects the automatic + * creation of indices only. It does not affect the creation of data streams. + *

+ * Optimistic concurrency control + *

+ * Index operations can be made conditional and only be performed if the last + * modification to the document was assigned the sequence number and primary + * term specified by the if_seq_no and if_primary_term + * parameters. If a mismatch is detected, the operation will result in a + * VersionConflictException and a status code of 409. + *

+ * Routing + *

+ * By default, shard placement — or routing — is controlled by using a hash of + * the document's ID value. For more explicit control, the value fed into the + * hash function used by the router can be directly specified on a per-operation + * basis using the routing parameter. + *

+ * When setting up explicit mapping, you can also use the _routing + * field to direct the index operation to extract the routing value from the + * document itself. This does come at the (very minimal) cost of an additional + * document parsing pass. If the _routing mapping is defined and + * set to be required, the index operation will fail if no routing value is + * provided or extracted. + *

+ * NOTE: Data streams do not support custom routing unless they were created + * with the allow_custom_routing setting enabled in the template. + *

+ * Distributed + *

+ * The index operation is directed to the primary shard based on its route and + * performed on the actual node containing this shard. After the primary shard + * completes the operation, if needed, the update is distributed to applicable + * replicas. + *

+ * Active shards + *

+ * To improve the resiliency of writes to the system, indexing operations can be + * configured to wait for a certain number of active shard copies before + * proceeding with the operation. If the requisite number of active shard copies + * are not available, then the write operation must wait and retry, until either + * the requisite shard copies have started or a timeout occurs. By default, + * write operations only wait for the primary shards to be active before + * proceeding (that is to say wait_for_active_shards is + * 1). This default can be overridden in the index settings + * dynamically by setting index.write.wait_for_active_shards. To + * alter this behavior per operation, use the + * wait_for_active_shards request parameter. + *

+ * Valid values are all or any positive integer up to the total number of + * configured copies per shard in the index (which is + * number_of_replicas+1). Specifying a negative value or a number + * greater than the number of shard copies will throw an error. + *

+ * For example, suppose you have a cluster of three nodes, A, B, and C and you + * create an index index with the number of replicas set to 3 (resulting in 4 + * shard copies, one more copy than there are nodes). If you attempt an indexing + * operation, by default the operation will only ensure the primary copy of each + * shard is available before proceeding. This means that even if B and C went + * down and A hosted the primary shard copies, the indexing operation would + * still proceed with only one copy of the data. If + * wait_for_active_shards is set on the request to 3 + * (and all three nodes are up), the indexing operation will require 3 active + * shard copies before proceeding. This requirement should be met because there + * are 3 active nodes in the cluster, each one holding a copy of the shard. + * However, if you set wait_for_active_shards to all + * (or to 4, which is the same in this situation), the indexing + * operation will not proceed as you do not have all 4 copies of each shard + * active in the index. The operation will timeout unless a new node is brought + * up in the cluster to host the fourth copy of the shard. + *

+ * It is important to note that this setting greatly reduces the chances of the + * write operation not writing to the requisite number of shard copies, but it + * does not completely eliminate the possibility, because this check occurs + * before the write operation starts. After the write operation is underway, it + * is still possible for replication to fail on any number of shard copies but + * still succeed on the primary. The _shards section of the API + * response reveals the number of shard copies on which replication succeeded + * and failed. + *

+ * No operation (noop) updates + *

+ * When updating a document by using this API, a new version of the document is + * always created even if the document hasn't changed. If this isn't acceptable + * use the _update API with detect_noop set to + * true. The detect_noop option isn't available on + * this API because it doesn’t fetch the old source and isn't able to compare it + * against the new source. + *

+ * There isn't a definitive rule for when noop updates aren't acceptable. It's a + * combination of lots of factors like how frequently your data source sends + * updates that are actually noops and how many queries per second Elasticsearch + * runs on the shard receiving the updates. + *

+ * Versioning + *

+ * Each indexed document is given a version number. By default, internal + * versioning is used that starts at 1 and increments with each update, deletes + * included. Optionally, the version number can be set to an external value (for + * example, if maintained in a database). To enable this functionality, + * version_type should be set to external. The value + * provided must be a numeric, long value greater than or equal to 0, and less + * than around 9.2e+18. + *

+ * NOTE: Versioning is completely real time, and is not affected by the near + * real time aspects of search operations. If no version is provided, the + * operation runs without any version checks. + *

+ * When using the external version type, the system checks to see if the version + * number passed to the index request is greater than the version of the + * currently stored document. If true, the document will be indexed and the new + * version number used. If the value provided is less than or equal to the + * stored document's version number, a version conflict will occur and the index + * operation will fail. For example: * + *

+ * PUT my-index-000001/_doc/1?version=2&version_type=external
+ * {
+ *   "user": {
+ *     "id": "elkbee"
+ *   }
+ * }
+ *
+ * In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+ * If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+ *
+ * A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+ * Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+ * 
+ * 
+ * * @see API * specification */ @@ -149,7 +335,9 @@ public static IndexRequest of( } /** - * Unique identifier for the document. + * A unique identifier for the document. To automatically generate a document + * ID, use the POST /<target>/_doc/ request format and omit + * this parameter. *

* API name: {@code id} */ @@ -179,7 +367,12 @@ public final Long ifSeqNo() { } /** - * Required - Name of the data stream or index to target. + * Required - The name of the data stream or index to target. If the target + * doesn't exist and matches the name or wildcard (*) pattern of an + * index template with a data_stream definition, this request + * creates the data stream. If the target doesn't exist and doesn't match a data + * stream template, this request creates the index. You can check for existing + * targets with the resolve index API. *

* API name: {@code index} */ @@ -188,13 +381,13 @@ public final String index() { } /** - * Set to create to only index the document if it does not already exist (put if - * absent). If a document with the specified _id already exists, - * the indexing operation will fail. Same as using the - * <index>/_create endpoint. Valid values: - * index, create. If document id is specified, it - * defaults to index. Otherwise, it defaults to - * create. + * Set to create to only index the document if it does not already + * exist (put if absent). If a document with the specified _id + * already exists, the indexing operation will fail. The behavior is the same as + * using the <index>/_create endpoint. If a document ID is + * specified, this paramater defaults to index. Otherwise, it + * defaults to create. If the request targets a data stream, an + * op_type of create is required. *

* API name: {@code op_type} */ @@ -204,8 +397,8 @@ public final OpType opType() { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, then setting the value to * _none disables the default ingest pipeline for this request. If * a final pipeline is configured it will always run, regardless of the value of * this parameter. @@ -219,10 +412,9 @@ public final String pipeline() { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -242,7 +434,7 @@ public final Boolean requireAlias() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -252,9 +444,17 @@ public final String routing() { } /** - * Period the request waits for the following operations: automatic index + * The period the request waits for the following operations: automatic index * creation, dynamic mapping updates, waiting for active shards. *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. + *

* API name: {@code timeout} */ @Nullable @@ -263,8 +463,8 @@ public final Time timeout() { } /** - * Explicit version number for concurrency control. The specified version must - * match the current version of the document for the request to succeed. + * An explicit version number for concurrency control. It must be a non-negative + * long number. *

* API name: {@code version} */ @@ -274,7 +474,7 @@ public final Long version() { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -285,8 +485,10 @@ public final VersionType versionType() { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ @@ -363,7 +565,9 @@ public static class Builder extends RequestBase.AbstractBuilder tDocumentSerializer; /** - * Unique identifier for the document. + * A unique identifier for the document. To automatically generate a document + * ID, use the POST /<target>/_doc/ request format and omit + * this parameter. *

* API name: {@code id} */ @@ -393,7 +597,12 @@ public final Builder ifSeqNo(@Nullable Long value) { } /** - * Required - Name of the data stream or index to target. + * Required - The name of the data stream or index to target. If the target + * doesn't exist and matches the name or wildcard (*) pattern of an + * index template with a data_stream definition, this request + * creates the data stream. If the target doesn't exist and doesn't match a data + * stream template, this request creates the index. You can check for existing + * targets with the resolve index API. *

* API name: {@code index} */ @@ -403,13 +612,13 @@ public final Builder index(String value) { } /** - * Set to create to only index the document if it does not already exist (put if - * absent). If a document with the specified _id already exists, - * the indexing operation will fail. Same as using the - * <index>/_create endpoint. Valid values: - * index, create. If document id is specified, it - * defaults to index. Otherwise, it defaults to - * create. + * Set to create to only index the document if it does not already + * exist (put if absent). If a document with the specified _id + * already exists, the indexing operation will fail. The behavior is the same as + * using the <index>/_create endpoint. If a document ID is + * specified, this paramater defaults to index. Otherwise, it + * defaults to create. If the request targets a data stream, an + * op_type of create is required. *

* API name: {@code op_type} */ @@ -419,8 +628,8 @@ public final Builder opType(@Nullable OpType value) { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, then setting the value to * _none disables the default ingest pipeline for this request. If * a final pipeline is configured it will always run, regardless of the value of * this parameter. @@ -434,10 +643,9 @@ public final Builder pipeline(@Nullable String value) { /** * If true, Elasticsearch refreshes the affected shards to make - * this operation visible to search, if wait_for then wait for a - * refresh to make this operation visible to search, if false do - * nothing with refreshes. Valid values: true, false, - * wait_for. + * this operation visible to search. If wait_for, it waits for a + * refresh to make this operation visible to search. If false, it + * does nothing with refreshes. *

* API name: {@code refresh} */ @@ -457,7 +665,7 @@ public final Builder requireAlias(@Nullable Boolean value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -467,9 +675,17 @@ public final Builder routing(@Nullable String value) { } /** - * Period the request waits for the following operations: automatic index + * The period the request waits for the following operations: automatic index * creation, dynamic mapping updates, waiting for active shards. *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. + *

* API name: {@code timeout} */ public final Builder timeout(@Nullable Time value) { @@ -478,9 +694,17 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period the request waits for the following operations: automatic index + * The period the request waits for the following operations: automatic index * creation, dynamic mapping updates, waiting for active shards. *

+ * This parameter is useful for situations where the primary shard assigned to + * perform the operation might not be available when the operation runs. Some + * reasons for this might be that the primary shard is currently recovering from + * a gateway or undergoing relocation. By default, the operation will wait on + * the primary shard to become available for at least 1 minute before failing + * and responding with an error. The actual wait time could be longer, + * particularly when multiple waits occur. + *

* API name: {@code timeout} */ public final Builder timeout(Function> fn) { @@ -488,8 +712,8 @@ public final Builder timeout(Function * API name: {@code version} */ @@ -499,7 +723,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type: external, external_gte. + * The version type. *

* API name: {@code version_type} */ @@ -510,8 +734,10 @@ public final Builder versionType(@Nullable VersionType value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ @@ -522,8 +748,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShard /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). + * operation. You can set it to all or any positive integer up to + * the total number of shards in the index (number_of_replicas+1). + * The default value of 1 means it waits for each primary shard to + * be active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchRequest.java index de8f79bee..2598304c7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchRequest.java @@ -79,7 +79,19 @@ *

* The kNN search API supports restricting the search using a filter. The search * will return the top k documents that also match the filter query. - * + *

+ * A kNN search response has the exact same structure as a search API response. + * However, certain sections have a meaning specific to kNN search: + *

    + *
  • The document _score is determined by the similarity between + * the query and document vector.
  • + *
  • The hits.total object contains the total number of nearest + * neighbor candidates considered, which is + * num_candidates * num_shards. The + * hits.total.relation will always be eq, indicating + * an exact value.
  • + *
+ * * @see API * specification * @deprecated 8.4.0 The kNN search API has been replaced by the @@ -127,7 +139,8 @@ public static KnnSearchRequest of(Functionhits._source property of the search + * response. *

* API name: {@code _source} */ @@ -138,7 +151,8 @@ public final SourceConfig source() { /** * The request returns doc values for field names matching these patterns in the - * hits.fields property of the response. Accepts wildcard (*) patterns. + * hits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code docvalue_fields} */ @@ -148,7 +162,8 @@ public final List docvalueFields() { /** * The request returns values for field names matching these patterns in the - * hits.fields property of the response. Accepts wildcard (*) patterns. + * hits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code fields} */ @@ -157,10 +172,10 @@ public final List fields() { } /** - * Query to filter the documents that can match. The kNN search will return the - * top k documents that also match this filter. The value can be a - * single query or a list of queries. If filter isn't provided, all - * documents are allowed to match. + * A query to filter the documents that can match. The kNN search will return + * the top k documents that also match this filter. The value can + * be a single query or a list of queries. If filter isn't + * provided, all documents are allowed to match. *

* API name: {@code filter} */ @@ -170,7 +185,7 @@ public final List filter() { /** * Required - A comma-separated list of index names to search; use - * _all or to perform the operation on all indices + * _all or to perform the operation on all indices. *

* API name: {@code index} */ @@ -179,7 +194,7 @@ public final List index() { } /** - * Required - kNN query to execute + * Required - The kNN query to run. *

* API name: {@code knn} */ @@ -188,7 +203,7 @@ public final KnnSearchQuery knn() { } /** - * A comma-separated list of specific routing values + * A comma-separated list of specific routing values. *

* API name: {@code routing} */ @@ -198,10 +213,11 @@ public final String routing() { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can pass _source: true to return - * both source fields and stored fields in the search response. + * A list of stored fields to return as part of a hit. If no fields are + * specified, no stored fields are included in the response. If this field is + * specified, the _source parameter defaults to false. + * You can pass _source: true to return both source fields and + * stored fields in the search response. *

* API name: {@code stored_fields} */ @@ -310,7 +326,8 @@ public static class Builder extends RequestBase.AbstractBuilder /** * Indicates which source fields are returned for matching documents. These - * fields are returned in the hits._source property of the search response. + * fields are returned in the hits._source property of the search + * response. *

* API name: {@code _source} */ @@ -321,7 +338,8 @@ public final Builder source(@Nullable SourceConfig value) { /** * Indicates which source fields are returned for matching documents. These - * fields are returned in the hits._source property of the search response. + * fields are returned in the hits._source property of the search + * response. *

* API name: {@code _source} */ @@ -331,7 +349,8 @@ public final Builder source(Functionhits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code docvalue_fields} *

@@ -344,7 +363,8 @@ public final Builder docvalueFields(List list) { /** * The request returns doc values for field names matching these patterns in the - * hits.fields property of the response. Accepts wildcard (*) patterns. + * hits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code docvalue_fields} *

@@ -357,7 +377,8 @@ public final Builder docvalueFields(FieldAndFormat value, FieldAndFormat... valu /** * The request returns doc values for field names matching these patterns in the - * hits.fields property of the response. Accepts wildcard (*) patterns. + * hits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code docvalue_fields} *

@@ -369,7 +390,8 @@ public final Builder docvalueFields(Functionhits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code fields} *

@@ -382,7 +404,8 @@ public final Builder fields(List list) { /** * The request returns values for field names matching these patterns in the - * hits.fields property of the response. Accepts wildcard (*) patterns. + * hits.fields property of the response. It accepts wildcard + * (*) patterns. *

* API name: {@code fields} *

@@ -394,10 +417,10 @@ public final Builder fields(String value, String... values) { } /** - * Query to filter the documents that can match. The kNN search will return the - * top k documents that also match this filter. The value can be a - * single query or a list of queries. If filter isn't provided, all - * documents are allowed to match. + * A query to filter the documents that can match. The kNN search will return + * the top k documents that also match this filter. The value can + * be a single query or a list of queries. If filter isn't + * provided, all documents are allowed to match. *

* API name: {@code filter} *

@@ -409,10 +432,10 @@ public final Builder filter(List list) { } /** - * Query to filter the documents that can match. The kNN search will return the - * top k documents that also match this filter. The value can be a - * single query or a list of queries. If filter isn't provided, all - * documents are allowed to match. + * A query to filter the documents that can match. The kNN search will return + * the top k documents that also match this filter. The value can + * be a single query or a list of queries. If filter isn't + * provided, all documents are allowed to match. *

* API name: {@code filter} *

@@ -424,10 +447,10 @@ public final Builder filter(Query value, Query... values) { } /** - * Query to filter the documents that can match. The kNN search will return the - * top k documents that also match this filter. The value can be a - * single query or a list of queries. If filter isn't provided, all - * documents are allowed to match. + * A query to filter the documents that can match. The kNN search will return + * the top k documents that also match this filter. The value can + * be a single query or a list of queries. If filter isn't + * provided, all documents are allowed to match. *

* API name: {@code filter} *

@@ -439,7 +462,7 @@ public final Builder filter(Function> fn) { /** * Required - A comma-separated list of index names to search; use - * _all or to perform the operation on all indices + * _all or to perform the operation on all indices. *

* API name: {@code index} *

@@ -452,7 +475,7 @@ public final Builder index(List list) { /** * Required - A comma-separated list of index names to search; use - * _all or to perform the operation on all indices + * _all or to perform the operation on all indices. *

* API name: {@code index} *

@@ -464,7 +487,7 @@ public final Builder index(String value, String... values) { } /** - * Required - kNN query to execute + * Required - The kNN query to run. *

* API name: {@code knn} */ @@ -474,7 +497,7 @@ public final Builder knn(KnnSearchQuery value) { } /** - * Required - kNN query to execute + * Required - The kNN query to run. *

* API name: {@code knn} */ @@ -483,7 +506,7 @@ public final Builder knn(Function * API name: {@code routing} */ @@ -493,10 +516,11 @@ public final Builder routing(@Nullable String value) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can pass _source: true to return - * both source fields and stored fields in the search response. + * A list of stored fields to return as part of a hit. If no fields are + * specified, no stored fields are included in the response. If this field is + * specified, the _source parameter defaults to false. + * You can pass _source: true to return both source fields and + * stored fields in the search response. *

* API name: {@code stored_fields} *

@@ -508,10 +532,11 @@ public final Builder storedFields(List list) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can pass _source: true to return - * both source fields and stored fields in the search response. + * A list of stored fields to return as part of a hit. If no fields are + * specified, no stored fields are included in the response. If this field is + * specified, the _source parameter defaults to false. + * You can pass _source: true to return both source fields and + * stored fields in the search response. *

* API name: {@code stored_fields} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchResponse.java index 38218dc76..c61d8c043 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/KnnSearchResponse.java @@ -105,7 +105,7 @@ public static KnnSearchResponse of( } /** - * Required - Milliseconds it took Elasticsearch to execute the request. + * Required - The milliseconds it took Elasticsearch to run the request. *

* API name: {@code took} */ @@ -124,7 +124,7 @@ public final boolean timedOut() { } /** - * Required - Contains a count of shards used for the request. + * Required - A count of shards used for the request. *

* API name: {@code _shards} */ @@ -133,7 +133,7 @@ public final ShardStatistics shards() { } /** - * Required - Contains returned documents and metadata. + * Required - The returned documents and metadata. *

* API name: {@code hits} */ @@ -142,8 +142,8 @@ public final HitsMetadata hits() { } /** - * Contains field values for the documents. These fields must be specified in - * the request using the fields parameter. + * The field values for the documents. These fields must be specified in the + * request using the fields parameter. *

* API name: {@code fields} */ @@ -152,8 +152,8 @@ public final Map fields() { } /** - * Highest returned document score. This value is null for requests that do not - * sort by score. + * The highest returned document score. This value is null for requests that do + * not sort by score. *

* API name: {@code max_score} */ @@ -236,7 +236,7 @@ public static class Builder extends WithJsonObjectBuilderBase tDocumentSerializer; /** - * Required - Milliseconds it took Elasticsearch to execute the request. + * Required - The milliseconds it took Elasticsearch to run the request. *

* API name: {@code took} */ @@ -257,7 +257,7 @@ public final Builder timedOut(boolean value) { } /** - * Required - Contains a count of shards used for the request. + * Required - A count of shards used for the request. *

* API name: {@code _shards} */ @@ -267,7 +267,7 @@ public final Builder shards(ShardStatistics value) { } /** - * Required - Contains a count of shards used for the request. + * Required - A count of shards used for the request. *

* API name: {@code _shards} */ @@ -276,7 +276,7 @@ public final Builder shards(Function * API name: {@code hits} */ @@ -286,7 +286,7 @@ public final Builder hits(HitsMetadata value) { } /** - * Required - Contains returned documents and metadata. + * Required - The returned documents and metadata. *

* API name: {@code hits} */ @@ -296,8 +296,8 @@ public final Builder hits( } /** - * Contains field values for the documents. These fields must be specified in - * the request using the fields parameter. + * The field values for the documents. These fields must be specified in the + * request using the fields parameter. *

* API name: {@code fields} *

@@ -309,8 +309,8 @@ public final Builder fields(Map map) { } /** - * Contains field values for the documents. These fields must be specified in - * the request using the fields parameter. + * The field values for the documents. These fields must be specified in the + * request using the fields parameter. *

* API name: {@code fields} *

@@ -322,8 +322,8 @@ public final Builder fields(String key, JsonData value) { } /** - * Highest returned document score. This value is null for requests that do not - * sort by score. + * The highest returned document score. This value is null for requests that do + * not sort by score. *

* API name: {@code max_score} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetRequest.java index 28297fcd9..d95346178 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetRequest.java @@ -68,6 +68,24 @@ * index in the request URI, you only need to specify the document IDs in the * request body. To ensure fast responses, this multi get (mget) API responds * with partial results if one or more shards fail. + *

+ * Filter source fields + *

+ * By default, the _source field is returned for every document (if + * stored). Use the _source and _source_include or + * source_exclude attributes to filter what fields are returned for + * a particular document. You can include the _source, + * _source_includes, and _source_excludes query + * parameters in the request URI to specify the defaults to use when there are + * no per-document instructions. + *

+ * Get stored fields + *

+ * Use the stored_fields attribute to specify the set of stored + * fields you want to retrieve. Any requested fields that are not stored are + * ignored. You can include the stored_fields query parameter in + * the request URI to specify the defaults to use when there are no per-document + * instructions. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetResponse.java index e70bce65f..fd04cb461 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MgetResponse.java @@ -83,7 +83,12 @@ public static MgetResponse of( } /** - * Required - API name: {@code docs} + * Required - The response includes a docs array that contains the documents in + * the order specified in the request. The structure of the returned documents + * is similar to that returned by the get API. If there is a failure getting a + * particular document, the error is included in place of the document. + *

+ * API name: {@code docs} */ public final List> docs() { return this.docs; @@ -133,7 +138,12 @@ public static class Builder extends WithJsonObjectBuilderBase tDocumentSerializer; /** - * Required - API name: {@code docs} + * Required - The response includes a docs array that contains the documents in + * the order specified in the request. The structure of the returned documents + * is similar to that returned by the get API. If there is a failure getting a + * particular document, the error is included in place of the document. + *

+ * API name: {@code docs} *

* Adds all elements of list to docs. */ @@ -143,7 +153,12 @@ public final Builder docs(List> list) } /** - * Required - API name: {@code docs} + * Required - The response includes a docs array that contains the documents in + * the order specified in the request. The structure of the returned documents + * is similar to that returned by the get API. If there is a failure getting a + * particular document, the error is included in place of the document. + *

+ * API name: {@code docs} *

* Adds one or more values to docs. */ @@ -154,7 +169,12 @@ public final Builder docs(MultiGetResponseItem value, } /** - * Required - API name: {@code docs} + * Required - The response includes a docs array that contains the documents in + * the order specified in the request. The structure of the returned documents + * is similar to that returned by the get API. If there is a failure getting a + * particular document, the error is included in place of the document. + *

+ * API name: {@code docs} *

* Adds a value to docs using a builder lambda. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MsearchTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MsearchTemplateRequest.java index ca78618da..b13c99bd0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MsearchTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MsearchTemplateRequest.java @@ -66,7 +66,23 @@ /** * Run multiple templated searches. + *

+ * Run multiple templated searches with a single request. If you are providing a + * text file or text input to curl, use the + * --data-binary flag instead of -d to preserve + * newlines. For example: * + *

+ * $ cat requests
+ * { "index": "my-index" }
+ * { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
+ * { "index": "my-other-index" }
+ * { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
+ *
+ * $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
+ * 
+ * 
+ * * @see API * specification @@ -118,8 +134,8 @@ public final Boolean ccsMinimizeRoundtrips() { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use *. *

* API name: {@code index} @@ -129,7 +145,7 @@ public final List index() { } /** - * Maximum number of concurrent searches the API can run. + * The maximum number of concurrent searches the API can run. *

* API name: {@code max_concurrent_searches} */ @@ -139,8 +155,7 @@ public final Long maxConcurrentSearches() { } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. *

* API name: {@code search_type} */ @@ -204,8 +219,8 @@ public final Builder ccsMinimizeRoundtrips(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use *. *

* API name: {@code index} @@ -218,8 +233,8 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use *. *

* API name: {@code index} @@ -232,7 +247,7 @@ public final Builder index(String value, String... values) { } /** - * Maximum number of concurrent searches the API can run. + * The maximum number of concurrent searches the API can run. *

* API name: {@code max_concurrent_searches} */ @@ -242,8 +257,7 @@ public final Builder maxConcurrentSearches(@Nullable Long value) { } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. *

* API name: {@code search_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MtermvectorsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MtermvectorsRequest.java index 948d2bd0e..b21b9a5f5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MtermvectorsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/MtermvectorsRequest.java @@ -65,11 +65,17 @@ /** * Get multiple term vectors. *

- * You can specify existing documents by index and ID or provide artificial - * documents in the body of the request. You can specify the index in the - * request body or request URI. The response contains a docs array - * with all the fetched termvectors. Each element has the structure provided by - * the termvectors API. + * Get multiple term vectors with a single request. You can specify existing + * documents by index and ID or provide artificial documents in the body of the + * request. You can specify the index in the request body or request URI. The + * response contains a docs array with all the fetched termvectors. + * Each element has the structure provided by the termvectors API. + *

+ * Artificial documents + *

+ * You can also use mtermvectors to generate term vectors for + * artificial documents provided in the body of the request. The mapping used is + * determined by the specified _index. * * @see API * specification @@ -141,7 +147,7 @@ public static MtermvectorsRequest of(Function * API name: {@code docs} */ @@ -161,10 +167,10 @@ public final Boolean fieldStatistics() { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} */ @@ -173,7 +179,7 @@ public final List fields() { } /** - * Simplified syntax to specify documents by their ID if they're in the same + * A simplified syntax to specify documents by their ID if they're in the same * index. *

* API name: {@code ids} @@ -183,7 +189,7 @@ public final List ids() { } /** - * Name of the index that contains the documents. + * The name of the index that contains the documents. *

* API name: {@code index} */ @@ -223,7 +229,7 @@ public final Boolean positions() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -244,7 +250,7 @@ public final Boolean realtime() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -274,7 +280,7 @@ public final Long version() { } /** - * Specific version type. + * The version type. *

* API name: {@code version_type} */ @@ -369,7 +375,7 @@ public static class Builder extends RequestBase.AbstractBuilder private VersionType versionType; /** - * Array of existing or artificial documents. + * An array of existing or artificial documents. *

* API name: {@code docs} *

@@ -381,7 +387,7 @@ public final Builder docs(List list) { } /** - * Array of existing or artificial documents. + * An array of existing or artificial documents. *

* API name: {@code docs} *

@@ -393,7 +399,7 @@ public final Builder docs(MultiTermVectorsOperation value, MultiTermVectorsOpera } /** - * Array of existing or artificial documents. + * An array of existing or artificial documents. *

* API name: {@code docs} *

@@ -416,10 +422,10 @@ public final Builder fieldStatistics(@Nullable Boolean value) { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} *

@@ -431,10 +437,10 @@ public final Builder fields(List list) { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} *

@@ -446,7 +452,7 @@ public final Builder fields(String value, String... values) { } /** - * Simplified syntax to specify documents by their ID if they're in the same + * A simplified syntax to specify documents by their ID if they're in the same * index. *

* API name: {@code ids} @@ -459,7 +465,7 @@ public final Builder ids(List list) { } /** - * Simplified syntax to specify documents by their ID if they're in the same + * A simplified syntax to specify documents by their ID if they're in the same * index. *

* API name: {@code ids} @@ -472,7 +478,7 @@ public final Builder ids(String value, String... values) { } /** - * Name of the index that contains the documents. + * The name of the index that contains the documents. *

* API name: {@code index} */ @@ -512,7 +518,7 @@ public final Builder positions(@Nullable Boolean value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -533,7 +539,7 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -563,7 +569,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java index 33f573830..62f2a1831 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java @@ -75,8 +75,50 @@ * the more recent point in time. *

* A point in time must be opened explicitly before being used in search - * requests. The keep_alive parameter tells Elasticsearch how long - * it should persist. + * requests. + *

+ * A subsequent search request with the pit parameter must not + * specify index, routing, or preference + * values as these parameters are copied from the point in time. + *

+ * Just like regular searches, you can use from and + * size to page through point in time search results, up to the + * first 10,000 hits. If you want to retrieve more hits, use PIT with + * search_after. + *

+ * IMPORTANT: The open point in time request and each subsequent search request + * can return different identifiers; always use the most recently received ID + * for the next search request. + *

+ * When a PIT that contains shard failures is used in a search request, the + * missing are always reported in the search response as a + * NoShardAvailableActionException exception. To get rid of these + * exceptions, a new PIT needs to be created so that shards missing from the + * previous PIT can be handled, assuming they become available in the meantime. + *

+ * Keeping point in time alive + *

+ * The keep_alive parameter, which is passed to a open point in + * time request and search request, extends the time to live of the + * corresponding point in time. The value does not need to be long enough to + * process all data — it just needs to be long enough for the next request. + *

+ * Normally, the background merge process optimizes the index by merging + * together smaller segments to create new, bigger segments. Once the smaller + * segments are no longer needed they are deleted. However, open point-in-times + * prevent the old segments from being deleted since they are still in use. + *

+ * TIP: Keeping older segments alive means that more disk space and file handles + * are needed. Ensure that you have configured your nodes to have ample free + * file handles. + *

+ * Additionally, if a segment contains deleted or updated documents then the + * point in time must keep track of whether each document in the segment was + * live at the time of the initial search request. Ensure that your nodes have + * sufficient heap space if you have many open point-in-times on an index that + * is subject to ongoing deletes or updates. Note that a point-in-time doesn't + * prevent its associated indices from being deleted. You can check how many + * point-in-times (that is, search contexts) are open with the nodes stats API. * * @see API @@ -125,10 +167,11 @@ public static OpenPointInTimeRequest of(Functionfalse, creating a point in time request when a shard is - * missing or unavailable will throw an exception. If true, the - * point in time will contain all the shards that are available at the time of - * the request. + * Indicates whether the point in time tolerates unavailable shards or shard + * failures when initially creating the PIT. If false, creating a + * point in time request when a shard is missing or unavailable will throw an + * exception. If true, the point in time will contain all the + * shards that are available at the time of the request. *

* API name: {@code allow_partial_search_results} */ @@ -138,9 +181,9 @@ public final Boolean allowPartialSearchResults() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -173,8 +216,8 @@ public final List index() { } /** - * Allows to filter indices if the provided query rewrites to - * match_none on every shard. + * Filter indices if the provided query rewrites to match_none on + * every shard. *

* API name: {@code index_filter} */ @@ -184,7 +227,7 @@ public final Query indexFilter() { } /** - * Required - Extends the time to live of the corresponding point in time. + * Required - Extend the length of time that the point in time persists. *

* API name: {@code keep_alive} */ @@ -193,8 +236,8 @@ public final Time keepAlive() { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, it is + * random. *

* API name: {@code preference} */ @@ -204,7 +247,7 @@ public final String preference() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -264,10 +307,11 @@ public static class Builder extends RequestBase.AbstractBuilder private String routing; /** - * If false, creating a point in time request when a shard is - * missing or unavailable will throw an exception. If true, the - * point in time will contain all the shards that are available at the time of - * the request. + * Indicates whether the point in time tolerates unavailable shards or shard + * failures when initially creating the PIT. If false, creating a + * point in time request when a shard is missing or unavailable will throw an + * exception. If true, the point in time will contain all the + * shards that are available at the time of the request. *

* API name: {@code allow_partial_search_results} */ @@ -277,9 +321,9 @@ public final Builder allowPartialSearchResults(@Nullable Boolean value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -294,9 +338,9 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -348,8 +392,8 @@ public final Builder index(String value, String... values) { } /** - * Allows to filter indices if the provided query rewrites to - * match_none on every shard. + * Filter indices if the provided query rewrites to match_none on + * every shard. *

* API name: {@code index_filter} */ @@ -359,8 +403,8 @@ public final Builder indexFilter(@Nullable Query value) { } /** - * Allows to filter indices if the provided query rewrites to - * match_none on every shard. + * Filter indices if the provided query rewrites to match_none on + * every shard. *

* API name: {@code index_filter} */ @@ -369,7 +413,7 @@ public final Builder indexFilter(Function> f } /** - * Required - Extends the time to live of the corresponding point in time. + * Required - Extend the length of time that the point in time persists. *

* API name: {@code keep_alive} */ @@ -379,7 +423,7 @@ public final Builder keepAlive(Time value) { } /** - * Required - Extends the time to live of the corresponding point in time. + * Required - Extend the length of time that the point in time persists. *

* API name: {@code keep_alive} */ @@ -388,8 +432,8 @@ public final Builder keepAlive(Function> fn) { } /** - * Specifies the node or shard the operation should be performed on. Random by - * default. + * The node or shard the operation should be performed on. By default, it is + * random. *

* API name: {@code preference} */ @@ -399,7 +443,7 @@ public final Builder preference(@Nullable String value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PutScriptRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PutScriptRequest.java index aaa373dce..1faf73a36 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PutScriptRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PutScriptRequest.java @@ -97,8 +97,8 @@ public static PutScriptRequest of(Function * API name: {@code context} */ @@ -108,8 +108,8 @@ public final String context() { } /** - * Required - Identifier for the stored script or search template. Must be - * unique within the cluster. + * Required - The identifier for the stored script or search template. It must + * be unique within the cluster. *

* API name: {@code id} */ @@ -118,8 +118,10 @@ public final String id() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -129,8 +131,7 @@ public final Time masterTimeout() { } /** - * Required - Contains the script or search template, its parameters, and its - * language. + * Required - The script or search template, its parameters, and its language. *

* API name: {@code script} */ @@ -139,8 +140,9 @@ public final StoredScript script() { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. It can also be set + * to -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -188,8 +190,8 @@ public static class Builder extends RequestBase.AbstractBuilder private Time timeout; /** - * Context in which the script or search template should run. To prevent errors, - * the API immediately compiles the script or template in this context. + * The context in which the script or search template should run. To prevent + * errors, the API immediately compiles the script or template in this context. *

* API name: {@code context} */ @@ -199,8 +201,8 @@ public final Builder context(@Nullable String value) { } /** - * Required - Identifier for the stored script or search template. Must be - * unique within the cluster. + * Required - The identifier for the stored script or search template. It must + * be unique within the cluster. *

* API name: {@code id} */ @@ -210,8 +212,10 @@ public final Builder id(String value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -221,8 +225,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * It can also be set to -1 to indicate that the request should + * never timeout. *

* API name: {@code master_timeout} */ @@ -231,8 +237,7 @@ public final Builder masterTimeout(Function> f } /** - * Required - Contains the script or search template, its parameters, and its - * language. + * Required - The script or search template, its parameters, and its language. *

* API name: {@code script} */ @@ -242,8 +247,7 @@ public final Builder script(StoredScript value) { } /** - * Required - Contains the script or search template, its parameters, and its - * language. + * Required - The script or search template, its parameters, and its language. *

* API name: {@code script} */ @@ -252,8 +256,9 @@ public final Builder script(Function-1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -263,8 +268,9 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. It can also be set + * to -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RankEvalRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RankEvalRequest.java index 4d9ab42ee..392cc3da1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RankEvalRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RankEvalRequest.java @@ -146,7 +146,7 @@ public final Boolean ignoreUnavailable() { } /** - * Comma-separated list of data streams, indices, and index aliases used to + * A comma-separated list of data streams, indices, and index aliases used to * limit the request. Wildcard (*) expressions are supported. To * target all data streams and indices in a cluster, omit this parameter or use * _all or *. @@ -296,7 +296,7 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and index aliases used to + * A comma-separated list of data streams, indices, and index aliases used to * limit the request. Wildcard (*) expressions are supported. To * target all data streams and indices in a cluster, omit this parameter or use * _all or *. @@ -311,7 +311,7 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and index aliases used to + * A comma-separated list of data streams, indices, and index aliases used to * limit the request. Wildcard (*) expressions are supported. To * target all data streams and indices in a cluster, omit this parameter or use * _all or *. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java index 134ec00d1..61bc78753 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java @@ -67,10 +67,274 @@ // typedef: _global.reindex.Request /** - * Reindex documents. Copies documents from a source to a destination. The - * source can be any existing index, alias, or data stream. The destination must - * differ from the source. For example, you cannot reindex a data stream into - * itself. + * Reindex documents. + *

+ * Copy documents from a source to a destination. You can copy all documents to + * the destination index or reindex a subset of the documents. The source can be + * any existing index, alias, or data stream. The destination must differ from + * the source. For example, you cannot reindex a data stream into itself. + *

+ * IMPORTANT: Reindex requires _source to be enabled for all + * documents in the source. The destination should be configured as wanted + * before calling the reindex API. Reindex does not copy the settings from the + * source or its associated template. Mappings, shard counts, and replicas, for + * example, must be configured ahead of time. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following security privileges: + *

+ *

+ * If reindexing from a remote cluster, you must explicitly allow the remote + * host in the reindex.remote.whitelist setting. Automatic data + * stream creation requires a matching index template with data stream enabled. + *

+ * The dest element can be configured like the index API to control + * optimistic concurrency control. Omitting version_type or setting + * it to internal causes Elasticsearch to blindly dump documents + * into the destination, overwriting any that happen to have the same ID. + *

+ * Setting version_type to external causes + * Elasticsearch to preserve the version from the source, create + * any documents that are missing, and update any documents that have an older + * version in the destination than they do in the source. + *

+ * Setting op_type to create causes the reindex API to + * create only missing documents in the destination. All existing documents will + * cause a version conflict. + *

+ * IMPORTANT: Because data streams are append-only, any reindex request to a + * destination data stream must have an op_type of + * create. A reindex can only add new documents to a destination + * data stream. It cannot update existing documents in a destination data + * stream. + *

+ * By default, version conflicts abort the reindex process. To continue + * reindexing if there are conflicts, set the conflicts request + * body property to proceed. In this case, the response includes a + * count of the version conflicts that were encountered. Note that the handling + * of other error types is unaffected by the conflicts property. + * Additionally, if you opt to count version conflicts, the operation could + * attempt to reindex more documents from the source than max_docs + * until it has successfully indexed max_docs documents into the + * target or it has gone through every document in the source query. + *

+ * NOTE: The reindex API makes no effort to handle ID collisions. The last + * document written will "win" but the order isn't usually predictable + * so it is not a good idea to rely on this behavior. Instead, make sure that + * IDs are unique by using a script. + *

+ * Running reindex asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to cancel or get the status of the task. Elasticsearch creates a + * record of this task as a document at _tasks/<task_id>. + *

+ * Reindex from multiple sources + *

+ * If you have many sources to reindex it is generally better to reindex them + * one at a time rather than using a glob pattern to pick up multiple sources. + * That way you can resume the process if there are any errors by removing the + * partially completed source and starting over. It also makes parallelizing the + * process fairly simple: split the list of sources to reindex and run each list + * in parallel. + *

+ * For example, you can use a bash script like this: + * + *

+ * for index in i1 i2 i3 i4 i5; do
+ *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+ *     "source": {
+ *       "index": "'$index'"
+ *     },
+ *     "dest": {
+ *       "index": "'$index'-reindexed"
+ *     }
+ *   }'
+ * done
+ * 
+ * 
+ *

+ * Throttling + *

+ * Set requests_per_second to any positive decimal number + * (1.4, 6, 1000, for example) to + * throttle the rate at which reindex issues batches of index operations. + * Requests are throttled by padding each batch with a wait time. To turn off + * throttling, set requests_per_second to -1. + *

+ * The throttling is done by waiting between batches so that the scroll that + * reindex uses internally can be given a timeout that takes into account the + * padding. The padding time is the difference between the batch size divided by + * the requests_per_second and the time spent writing. By default + * the batch size is 1000, so if requests_per_second + * is set to 500: + * + *

+ * target_time = 1000 / 500 per second = 2 seconds
+ * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+ * 
+ * 
+ *

+ * Since the batch is issued as a single bulk request, large batch sizes cause + * Elasticsearch to create many requests and then wait for a while before + * starting the next set. This is "bursty" instead of + * "smooth". + *

+ * Slicing + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * You can slice a reindex request manually by providing a slice ID and total + * number of slices to each request. You can also let reindex automatically + * parallelize by using sliced scroll to slice on _id. The + * slices parameter specifies the number of slices to use. + *

+ * Adding slices to the reindex request just automates the manual + * process, creating sub-requests which means it has some quirks: + *

    + *
  • You can see these requests in the tasks API. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each + * sub-request.
  • + *
  • Due to the nature of slices, each sub-request won't get a + * perfectly even portion of the documents. All documents will be addressed, but + * some slices may be larger than others. Expect larger slices to have a more + * even distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed + * proportionally to each sub-request. Combine that with the previous point + * about distribution being uneven and you should conclude that using + * max_docs with slices might not result in exactly + * max_docs documents being reindexed.
  • + *
  • Each sub-request gets a slightly different snapshot of the source, though + * these are all taken at approximately the same time.
  • + *
+ *

+ * If slicing automatically, setting slices to auto + * will choose a reasonable number for most indices. If slicing manually or + * otherwise tuning automatic slicing, use the following guidelines. + *

+ * Query performance is most efficient when the number of slices is equal to the + * number of shards in the index. If that number is large (for example, + * 500), choose a lower number as too many slices will hurt + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead. + *

+ * Indexing performance scales linearly across available resources with the + * number of slices. + *

+ * Whether query or indexing performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Modify documents during reindexing + *

+ * Like _update_by_query, reindex operations support a script that + * modifies the document. Unlike _update_by_query, the script is + * allowed to modify the document's metadata. + *

+ * Just as in _update_by_query, you can set ctx.op to + * change the operation that is run on the destination. For example, set + * ctx.op to noop if your script decides that the + * document doesn’t have to be indexed in the destination. This "no + * operation" will be reported in the noop counter in the + * response body. Set ctx.op to delete if your script + * decides that the document must be deleted from the destination. The deletion + * will be reported in the deleted counter in the response body. + * Setting ctx.op to anything else will return an error, as will + * setting any other field in ctx. + *

+ * Think of the possibilities! Just be careful; you are able to change: + *

    + *
  • _id
  • + *
  • _index
  • + *
  • _version
  • + *
  • _routing
  • + *
+ *

+ * Setting _version to null or clearing it from the + * ctx map is just like not sending the version in an indexing + * request. It will cause the document to be overwritten in the destination + * regardless of the version on the target or the version type you use in the + * reindex API. + *

+ * Reindex from remote + *

+ * Reindex supports reindexing from a remote Elasticsearch cluster. The + * host parameter must contain a scheme, host, port, and optional + * path. The username and password parameters are + * optional and when they are present the reindex operation will connect to the + * remote Elasticsearch node using basic authentication. Be sure to use HTTPS + * when using basic authentication or the password will be sent in plain text. + * There are a range of settings available to configure the behavior of the + * HTTPS connection. + *

+ * When using Elastic Cloud, it is also possible to authenticate against the + * remote cluster through the use of a valid API key. Remote hosts must be + * explicitly allowed with the reindex.remote.whitelist setting. It + * can be set to a comma delimited list of allowed remote host and port + * combinations. Scheme is ignored; only the host and port are used. For + * example: + * + *

+ * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+ * 
+ * 
+ *

+ * The list of allowed hosts must be configured on any nodes that will + * coordinate the reindex. This feature should work with remote clusters of any + * version of Elasticsearch. This should enable you to upgrade from any version + * of Elasticsearch to the current version by reindexing from a cluster of the + * old version. + *

+ * WARNING: Elasticsearch does not support forward compatibility across major + * versions. For example, you cannot reindex from a 7.x cluster into a 6.x + * cluster. + *

+ * To enable queries sent to older versions of Elasticsearch, the + * query parameter is sent directly to the remote host without + * validation or modification. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * Reindexing from a remote server uses an on-heap buffer that defaults to a + * maximum size of 100mb. If the remote index includes very large documents + * you'll need to use a smaller batch size. It is also possible to set the + * socket read timeout on the remote connection with the + * socket_timeout field and the connection timeout with the + * connect_timeout field. Both default to 30 seconds. + *

+ * Configuring SSL parameters + *

+ * Reindex from remote supports configurable SSL settings. These must be + * specified in the elasticsearch.yml file, with the exception of + * the secure settings, which you add in the Elasticsearch keystore. It is not + * possible to configure SSL in the body of the reindex request. * * @see API * specification @@ -143,7 +407,7 @@ public static ReindexRequest of(Function> } /** - * Set to proceed to continue reindexing even if there are conflicts. + * Indicates whether to continue reindexing even when there are conflicts. *

* API name: {@code conflicts} */ @@ -162,7 +426,15 @@ public final Destination dest() { } /** - * The maximum number of documents to reindex. + * The maximum number of documents to reindex. By default, all documents are + * reindexed. If it is a value less then or equal to scroll_size, a + * scroll will not be used to retrieve the results for the operation. + *

+ * If conflicts is set to proceed, the reindex + * operation could attempt to reindex more documents from the source than + * max_docs until it has successfully indexed max_docs + * documents into the target or it has gone through every document in the source + * query. *

* API name: {@code max_docs} */ @@ -183,8 +455,8 @@ public final Boolean refresh() { } /** - * The throttle for this request in sub-requests per second. Defaults to no - * throttle. + * The throttle for this request in sub-requests per second. By default, there + * is no throttle. *

* API name: {@code requests_per_second} */ @@ -214,8 +486,8 @@ public final Script script() { } /** - * Specifies how long a consistent view of the index should be maintained for - * scrolled search. + * The period of time that a consistent view of the index should be maintained + * for scrolled search. *

* API name: {@code scroll} */ @@ -233,8 +505,20 @@ public final Long size() { } /** - * The number of slices this task should be divided into. Defaults to 1 slice, - * meaning the task isn’t sliced into subtasks. + * The number of slices this task should be divided into. It defaults to one + * slice, which means the task isn't sliced into subtasks. + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * If set to auto, Elasticsearch chooses the number of slices to + * use. This setting will use one slice per shard, up to a certain limit. If + * there are multiple sources, it will choose the number of slices based on the + * index or backing index with the smallest number of shards. *

* API name: {@code slices} */ @@ -253,8 +537,10 @@ public final Source source() { } /** - * Period each indexing waits for automatic index creation, dynamic mapping - * updates, and waiting for active shards. + * The period each indexing waits for automatic index creation, dynamic mapping + * updates, and waiting for active shards. By default, Elasticsearch waits for + * at least one minute before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -265,8 +551,10 @@ public final Time timeout() { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. Set it to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default value is one, which means it waits for each primary shard to be + * active. *

* API name: {@code wait_for_active_shards} */ @@ -371,7 +659,7 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Boolean waitForCompletion; /** - * Set to proceed to continue reindexing even if there are conflicts. + * Indicates whether to continue reindexing even when there are conflicts. *

* API name: {@code conflicts} */ @@ -400,7 +688,15 @@ public final Builder dest(Functionscroll_size, a + * scroll will not be used to retrieve the results for the operation. + *

+ * If conflicts is set to proceed, the reindex + * operation could attempt to reindex more documents from the source than + * max_docs until it has successfully indexed max_docs + * documents into the target or it has gone through every document in the source + * query. *

* API name: {@code max_docs} */ @@ -421,8 +717,8 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * The throttle for this request in sub-requests per second. Defaults to no - * throttle. + * The throttle for this request in sub-requests per second. By default, there + * is no throttle. *

* API name: {@code requests_per_second} */ @@ -461,8 +757,8 @@ public final Builder script(Function> fn) } /** - * Specifies how long a consistent view of the index should be maintained for - * scrolled search. + * The period of time that a consistent view of the index should be maintained + * for scrolled search. *

* API name: {@code scroll} */ @@ -472,8 +768,8 @@ public final Builder scroll(@Nullable Time value) { } /** - * Specifies how long a consistent view of the index should be maintained for - * scrolled search. + * The period of time that a consistent view of the index should be maintained + * for scrolled search. *

* API name: {@code scroll} */ @@ -490,8 +786,20 @@ public final Builder size(@Nullable Long value) { } /** - * The number of slices this task should be divided into. Defaults to 1 slice, - * meaning the task isn’t sliced into subtasks. + * The number of slices this task should be divided into. It defaults to one + * slice, which means the task isn't sliced into subtasks. + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * If set to auto, Elasticsearch chooses the number of slices to + * use. This setting will use one slice per shard, up to a certain limit. If + * there are multiple sources, it will choose the number of slices based on the + * index or backing index with the smallest number of shards. *

* API name: {@code slices} */ @@ -501,8 +809,20 @@ public final Builder slices(@Nullable Slices value) { } /** - * The number of slices this task should be divided into. Defaults to 1 slice, - * meaning the task isn’t sliced into subtasks. + * The number of slices this task should be divided into. It defaults to one + * slice, which means the task isn't sliced into subtasks. + *

+ * Reindex supports sliced scroll to parallelize the reindexing process. This + * parallelization can improve efficiency and provide a convenient way to break + * the request down into smaller parts. + *

+ * NOTE: Reindexing from remote clusters does not support manual or automatic + * slicing. + *

+ * If set to auto, Elasticsearch chooses the number of slices to + * use. This setting will use one slice per shard, up to a certain limit. If + * there are multiple sources, it will choose the number of slices based on the + * index or backing index with the smallest number of shards. *

* API name: {@code slices} */ @@ -530,8 +850,10 @@ public final Builder source(Function> fn) } /** - * Period each indexing waits for automatic index creation, dynamic mapping - * updates, and waiting for active shards. + * The period each indexing waits for automatic index creation, dynamic mapping + * updates, and waiting for active shards. By default, Elasticsearch waits for + * at least one minute before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -541,8 +863,10 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period each indexing waits for automatic index creation, dynamic mapping - * updates, and waiting for active shards. + * The period each indexing waits for automatic index creation, dynamic mapping + * updates, and waiting for active shards. By default, Elasticsearch waits for + * at least one minute before failing. The actual wait time could be longer, + * particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -552,8 +876,10 @@ public final Builder timeout(Function> fn) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. Set it to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default value is one, which means it waits for each primary shard to be + * active. *

* API name: {@code wait_for_active_shards} */ @@ -564,8 +890,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { /** * The number of shard copies that must be active before proceeding with the - * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * operation. Set it to all or any positive integer up to the total + * number of shards in the index (number_of_replicas+1). The + * default value is one, which means it waits for each primary shard to be + * active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexResponse.java index 153999c54..5276670c3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexResponse.java @@ -141,6 +141,8 @@ public static ReindexResponse of(Function * API name: {@code batches} */ @Nullable @@ -149,6 +151,8 @@ public final Long batches() { } /** + * The number of documents that were successfully created. + *

* API name: {@code created} */ @Nullable @@ -157,6 +161,8 @@ public final Long created() { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ @Nullable @@ -165,6 +171,13 @@ public final Long deleted() { } /** + * If there were any unrecoverable errors during the process, it is an array of + * those failures. If this array is not empty, the request ended because of + * those failures. Reindex is implemented using batches and any failure causes + * the entire process to end but all failures in the current batch are collected + * into the array. You can use the conflicts option to prevent the + * reindex from ending on version conflicts. + *

* API name: {@code failures} */ public final List failures() { @@ -172,6 +185,9 @@ public final List failures() { } /** + * The number of documents that were ignored because the script used for the + * reindex returned a noop value for ctx.op. + *

* API name: {@code noops} */ @Nullable @@ -180,6 +196,8 @@ public final Long noops() { } /** + * The number of retries attempted by reindex. + *

* API name: {@code retries} */ @Nullable @@ -188,6 +206,8 @@ public final Retries retries() { } /** + * The number of requests per second effectively run during the reindex. + *

* API name: {@code requests_per_second} */ @Nullable @@ -212,6 +232,9 @@ public final String task() { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ @Nullable @@ -220,6 +243,11 @@ public final Long throttledMillis() { } /** + * This field should always be equal to zero in a reindex response. It has + * meaning only when using the task API, where it indicates the next time (in + * milliseconds since epoch) that a throttled request will be run again in order + * to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ @Nullable @@ -228,6 +256,9 @@ public final Long throttledUntilMillis() { } /** + * If any of the requests that ran during the reindex timed out, it is + * true. + *

* API name: {@code timed_out} */ @Nullable @@ -236,6 +267,8 @@ public final Boolean timedOut() { } /** + * The total milliseconds the entire operation took. + *

* API name: {@code took} */ @Nullable @@ -244,6 +277,8 @@ public final Long took() { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ @Nullable @@ -252,6 +287,9 @@ public final Long total() { } /** + * The number of documents that were successfully updated. That is to say, a + * document with the same ID already existed before the reindex updated it. + *

* API name: {@code updated} */ @Nullable @@ -260,6 +298,8 @@ public final Long updated() { } /** + * The number of version conflicts that occurred. + *

* API name: {@code version_conflicts} */ @Nullable @@ -427,6 +467,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Long versionConflicts; /** + * The number of scroll responses that were pulled back by the reindex. + *

* API name: {@code batches} */ public final Builder batches(@Nullable Long value) { @@ -435,6 +477,8 @@ public final Builder batches(@Nullable Long value) { } /** + * The number of documents that were successfully created. + *

* API name: {@code created} */ public final Builder created(@Nullable Long value) { @@ -443,6 +487,8 @@ public final Builder created(@Nullable Long value) { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ public final Builder deleted(@Nullable Long value) { @@ -451,6 +497,13 @@ public final Builder deleted(@Nullable Long value) { } /** + * If there were any unrecoverable errors during the process, it is an array of + * those failures. If this array is not empty, the request ended because of + * those failures. Reindex is implemented using batches and any failure causes + * the entire process to end but all failures in the current batch are collected + * into the array. You can use the conflicts option to prevent the + * reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds all elements of list to failures. @@ -461,6 +514,13 @@ public final Builder failures(List list) { } /** + * If there were any unrecoverable errors during the process, it is an array of + * those failures. If this array is not empty, the request ended because of + * those failures. Reindex is implemented using batches and any failure causes + * the entire process to end but all failures in the current batch are collected + * into the array. You can use the conflicts option to prevent the + * reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds one or more values to failures. @@ -471,6 +531,13 @@ public final Builder failures(BulkIndexByScrollFailure value, BulkIndexByScrollF } /** + * If there were any unrecoverable errors during the process, it is an array of + * those failures. If this array is not empty, the request ended because of + * those failures. Reindex is implemented using batches and any failure causes + * the entire process to end but all failures in the current batch are collected + * into the array. You can use the conflicts option to prevent the + * reindex from ending on version conflicts. + *

* API name: {@code failures} *

* Adds a value to failures using a builder lambda. @@ -481,6 +548,9 @@ public final Builder failures( } /** + * The number of documents that were ignored because the script used for the + * reindex returned a noop value for ctx.op. + *

* API name: {@code noops} */ public final Builder noops(@Nullable Long value) { @@ -489,6 +559,8 @@ public final Builder noops(@Nullable Long value) { } /** + * The number of retries attempted by reindex. + *

* API name: {@code retries} */ public final Builder retries(@Nullable Retries value) { @@ -497,6 +569,8 @@ public final Builder retries(@Nullable Retries value) { } /** + * The number of retries attempted by reindex. + *

* API name: {@code retries} */ public final Builder retries(Function> fn) { @@ -504,6 +578,8 @@ public final Builder retries(Function> f } /** + * The number of requests per second effectively run during the reindex. + *

* API name: {@code requests_per_second} */ public final Builder requestsPerSecond(@Nullable Float value) { @@ -528,6 +604,9 @@ public final Builder task(@Nullable String value) { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ public final Builder throttledMillis(@Nullable Long value) { @@ -536,6 +615,11 @@ public final Builder throttledMillis(@Nullable Long value) { } /** + * This field should always be equal to zero in a reindex response. It has + * meaning only when using the task API, where it indicates the next time (in + * milliseconds since epoch) that a throttled request will be run again in order + * to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ public final Builder throttledUntilMillis(@Nullable Long value) { @@ -544,6 +628,9 @@ public final Builder throttledUntilMillis(@Nullable Long value) { } /** + * If any of the requests that ran during the reindex timed out, it is + * true. + *

* API name: {@code timed_out} */ public final Builder timedOut(@Nullable Boolean value) { @@ -552,6 +639,8 @@ public final Builder timedOut(@Nullable Boolean value) { } /** + * The total milliseconds the entire operation took. + *

* API name: {@code took} */ public final Builder took(@Nullable Long value) { @@ -560,6 +649,8 @@ public final Builder took(@Nullable Long value) { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ public final Builder total(@Nullable Long value) { @@ -568,6 +659,9 @@ public final Builder total(@Nullable Long value) { } /** + * The number of documents that were successfully updated. That is to say, a + * document with the same ID already existed before the reindex updated it. + *

* API name: {@code updated} */ public final Builder updated(@Nullable Long value) { @@ -576,6 +670,8 @@ public final Builder updated(@Nullable Long value) { } /** + * The number of version conflicts that occurred. + *

* API name: {@code version_conflicts} */ public final Builder versionConflicts(@Nullable Long value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRethrottleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRethrottleRequest.java index 24a60f706..b100e8d31 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRethrottleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRethrottleRequest.java @@ -59,6 +59,16 @@ * Throttle a reindex operation. *

* Change the number of requests per second for a particular reindex operation. + * For example: + * + *

+ * POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+ * 
+ * 
+ *

+ * Rethrottling that speeds up the query takes effect immediately. Rethrottling + * that slows down the query will take effect after completing the current + * batch. This behavior prevents scroll timeouts. * * @see API @@ -85,7 +95,9 @@ public static ReindexRethrottleRequest of(Function-1 to turn off throttling or any decimal number like + * 1.7 or 12 to throttle to that level. *

* API name: {@code requests_per_second} */ @@ -95,7 +107,7 @@ public final Float requestsPerSecond() { } /** - * Required - Identifier for the task. + * Required - The task identifier, which can be found by using the tasks API. *

* API name: {@code task_id} */ @@ -118,7 +130,9 @@ public static class Builder extends RequestBase.AbstractBuilder private String taskId; /** - * The throttle for this request in sub-requests per second. + * The throttle for this request in sub-requests per second. It can be either + * -1 to turn off throttling or any decimal number like + * 1.7 or 12 to throttle to that level. *

* API name: {@code requests_per_second} */ @@ -128,7 +142,7 @@ public final Builder requestsPerSecond(@Nullable Float value) { } /** - * Required - Identifier for the task. + * Required - The task identifier, which can be found by using the tasks API. *

* API name: {@code task_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RenderSearchTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RenderSearchTemplateRequest.java index 9622a469b..acbda5d0d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RenderSearchTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/RenderSearchTemplateRequest.java @@ -35,7 +35,6 @@ import jakarta.json.stream.JsonGenerator; import java.lang.String; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -104,8 +103,11 @@ public final String file() { } /** - * ID of the search template to render. If no source is specified, - * this or the id request body parameter is required. + * The ID of the search template to render. If no source is + * specified, this or the <template-id> request path + * parameter is required. If you specify both this parameter and the + * <template-id> parameter, the API uses only + * <template-id>. *

* API name: {@code id} */ @@ -125,8 +127,8 @@ public final Map params() { } /** - * An inline search template. Supports the same parameters as the search API's - * request body. These parameters also support Mustache variables. If no + * An inline search template. It supports the same parameters as the search + * API's request body. These parameters also support Mustache variables. If no * id or <templated-id> is specified, this * parameter is required. *

@@ -152,6 +154,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("file"); generator.write(this.file); + } + if (this.id != null) { + generator.writeKey("id"); + generator.write(this.id); + } if (ApiTypeHelper.isDefined(this.params)) { generator.writeKey("params"); @@ -202,8 +209,11 @@ public final Builder file(@Nullable String value) { } /** - * ID of the search template to render. If no source is specified, - * this or the id request body parameter is required. + * The ID of the search template to render. If no source is + * specified, this or the <template-id> request path + * parameter is required. If you specify both this parameter and the + * <template-id> parameter, the API uses only + * <template-id>. *

* API name: {@code id} */ @@ -239,8 +249,8 @@ public final Builder params(String key, JsonData value) { } /** - * An inline search template. Supports the same parameters as the search API's - * request body. These parameters also support Mustache variables. If no + * An inline search template. It supports the same parameters as the search + * API's request body. These parameters also support Mustache variables. If no * id or <templated-id> is specified, this * parameter is required. *

@@ -281,6 +291,7 @@ protected static void setupRenderSearchTemplateRequestDeserializer( ObjectDeserializer op) { op.add(Builder::file, JsonpDeserializer.stringDeserializer(), "file"); + op.add(Builder::id, JsonpDeserializer.stringDeserializer(), "id"); op.add(Builder::params, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "params"); op.add(Builder::source, JsonpDeserializer.stringDeserializer(), "source"); @@ -302,47 +313,13 @@ protected static void setupRenderSearchTemplateRequestDeserializer( // Request path request -> { - final int _id = 1 << 0; - - int propsSet = 0; - - if (request.id() != null) - propsSet |= _id; - - if (propsSet == 0) { - StringBuilder buf = new StringBuilder(); - buf.append("/_render"); - buf.append("/template"); - return buf.toString(); - } - if (propsSet == (_id)) { - StringBuilder buf = new StringBuilder(); - buf.append("/_render"); - buf.append("/template"); - buf.append("/"); - SimpleEndpoint.pathEncode(request.id, buf); - return buf.toString(); - } - throw SimpleEndpoint.noPathTemplateFound("path"); + return "/_render/template"; }, // Path parameters request -> { - Map params = new HashMap<>(); - final int _id = 1 << 0; - - int propsSet = 0; - - if (request.id() != null) - propsSet |= _id; - - if (propsSet == 0) { - } - if (propsSet == (_id)) { - params.put("id", request.id); - } - return params; + return Collections.emptyMap(); }, // Request parameters diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScriptsPainlessExecuteRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScriptsPainlessExecuteRequest.java index 90f19710d..469083f2d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScriptsPainlessExecuteRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScriptsPainlessExecuteRequest.java @@ -22,6 +22,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; import co.elastic.clients.elasticsearch._types.Script; +import co.elastic.clients.elasticsearch.core.scripts_painless_execute.PainlessContext; import co.elastic.clients.elasticsearch.core.scripts_painless_execute.PainlessContextSetup; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -33,7 +34,6 @@ import co.elastic.clients.transport.endpoints.SimpleEndpoint; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; -import java.lang.String; import java.util.Collections; import java.util.Objects; import java.util.function.Function; @@ -57,7 +57,18 @@ // typedef: _global.scripts_painless_execute.Request /** - * Run a script. Runs a script and returns a result. + * Run a script. + *

+ * Runs a script and returns a result. Use this API to build and test scripts, + * such as when defining a script for a runtime field. This API requires very + * few dependencies and is especially useful if you don't have permissions to + * write documents on a cluster. + *

+ * The API uses several contexts, which control how scripts are run, + * what variables are available at runtime, and what the return type is. + *

+ * Each context requires a script, but additional parameters depend on the + * context you're using for that script. * * @see API @@ -66,7 +77,7 @@ @JsonpDeserializable public class ScriptsPainlessExecuteRequest extends RequestBase implements JsonpSerializable { @Nullable - private final String context; + private final PainlessContext context; @Nullable private final PainlessContextSetup contextSetup; @@ -89,17 +100,20 @@ public static ScriptsPainlessExecuteRequest of(Function * API name: {@code context} */ @Nullable - public final String context() { + public final PainlessContext context() { return this.context; } /** - * Additional parameters for the context. + * Additional parameters for the context. NOTE: This parameter is + * required for all contexts except painless_test, which is the + * default if no value is provided for context. *

* API name: {@code context_setup} */ @@ -109,7 +123,7 @@ public final PainlessContextSetup contextSetup() { } /** - * The Painless script to execute. + * The Painless script to run. *

* API name: {@code script} */ @@ -131,8 +145,7 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { if (this.context != null) { generator.writeKey("context"); - generator.write(this.context); - + this.context.serialize(generator, mapper); } if (this.contextSetup != null) { generator.writeKey("context_setup"); @@ -157,7 +170,7 @@ public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { @Nullable - private String context; + private PainlessContext context; @Nullable private PainlessContextSetup contextSetup; @@ -166,17 +179,20 @@ public static class Builder extends RequestBase.AbstractBuilder private Script script; /** - * The context that the script should run in. + * The context that the script should run in. NOTE: Result ordering in the field + * contexts is not guaranteed. *

* API name: {@code context} */ - public final Builder context(@Nullable String value) { + public final Builder context(@Nullable PainlessContext value) { this.context = value; return this; } /** - * Additional parameters for the context. + * Additional parameters for the context. NOTE: This parameter is + * required for all contexts except painless_test, which is the + * default if no value is provided for context. *

* API name: {@code context_setup} */ @@ -186,7 +202,9 @@ public final Builder contextSetup(@Nullable PainlessContextSetup value) { } /** - * Additional parameters for the context. + * Additional parameters for the context. NOTE: This parameter is + * required for all contexts except painless_test, which is the + * default if no value is provided for context. *

* API name: {@code context_setup} */ @@ -196,7 +214,7 @@ public final Builder contextSetup( } /** - * The Painless script to execute. + * The Painless script to run. *

* API name: {@code script} */ @@ -206,7 +224,7 @@ public final Builder script(@Nullable Script value) { } /** - * The Painless script to execute. + * The Painless script to run. *

* API name: {@code script} */ @@ -243,7 +261,7 @@ public ScriptsPainlessExecuteRequest build() { protected static void setupScriptsPainlessExecuteRequestDeserializer( ObjectDeserializer op) { - op.add(Builder::context, JsonpDeserializer.stringDeserializer(), "context"); + op.add(Builder::context, PainlessContext._DESERIALIZER, "context"); op.add(Builder::contextSetup, PainlessContextSetup._DESERIALIZER, "context_setup"); op.add(Builder::script, Script._DESERIALIZER, "script"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScrollRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScrollRequest.java index 440f2039b..e21fe1087 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScrollRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ScrollRequest.java @@ -105,7 +105,7 @@ public static ScrollRequest of(Function> f } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -115,7 +115,7 @@ public final Time scroll() { } /** - * Required - Scroll ID of the search. + * Required - The scroll ID of the search. *

* API name: {@code scroll_id} */ @@ -157,7 +157,7 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private String scrollId; /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -167,7 +167,7 @@ public final Builder scroll(@Nullable Time value) { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -176,7 +176,7 @@ public final Builder scroll(Function> fn) { } /** - * Required - Scroll ID of the search. + * Required - The scroll ID of the search. *

* API name: {@code scroll_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchMvtRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchMvtRequest.java index 45afb9ef6..854add08f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchMvtRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchMvtRequest.java @@ -73,7 +73,361 @@ /** * Search a vector tile. *

- * Search a vector tile for geospatial values. + * Search a vector tile for geospatial values. Before using this API, you should + * be familiar with the Mapbox vector tile specification. The API returns + * results as a binary mapbox vector tile. + *

+ * Internally, Elasticsearch translates a vector tile search API request into a + * search containing: + *

+ *

+ * For example, Elasticsearch may translate a vector tile search API request + * with a grid_agg argument of geotile and an + * exact_bounds argument of true into the following + * search + * + *

+ * GET my-index/_search
+ * {
+ *   "size": 10000,
+ *   "query": {
+ *     "geo_bounding_box": {
+ *       "my-geo-field": {
+ *         "top_left": {
+ *           "lat": -40.979898069620134,
+ *           "lon": -45
+ *         },
+ *         "bottom_right": {
+ *           "lat": -66.51326044311186,
+ *           "lon": 0
+ *         }
+ *       }
+ *     }
+ *   },
+ *   "aggregations": {
+ *     "grid": {
+ *       "geotile_grid": {
+ *         "field": "my-geo-field",
+ *         "precision": 11,
+ *         "size": 65536,
+ *         "bounds": {
+ *           "top_left": {
+ *             "lat": -40.979898069620134,
+ *             "lon": -45
+ *           },
+ *           "bottom_right": {
+ *             "lat": -66.51326044311186,
+ *             "lon": 0
+ *           }
+ *         }
+ *       }
+ *     },
+ *     "bounds": {
+ *       "geo_bounds": {
+ *         "field": "my-geo-field",
+ *         "wrap_longitude": false
+ *       }
+ *     }
+ *   }
+ * }
+ * 
+ * 
+ *

+ * The API returns results as a binary Mapbox vector tile. Mapbox vector tiles + * are encoded as Google Protobufs (PBF). By default, the tile contains three + * layers: + *

    + *
  • A hits layer containing a feature for each + * <field> value matching the geo_bounding_box + * query.
  • + *
  • An aggs layer containing a feature for each cell of the + * geotile_grid or geohex_grid. The layer only + * contains features for cells with matching data.
  • + *
  • A meta layer containing: + *
      + *
    • A feature containing a bounding box. By default, this is the bounding box + * of the tile.
    • + *
    • Value ranges for any sub-aggregations on the geotile_grid or + * geohex_grid.
    • + *
    • Metadata for the search.
    • + *
    + *
  • + *
+ *

+ * The API only returns features that can display at its zoom level. For + * example, if a polygon feature has no area at its zoom level, the API omits + * it. The API returns errors as UTF-8 encoded JSON. + *

+ * IMPORTANT: You can specify several options for this API as either a query + * parameter or request body parameter. If you specify both parameters, the + * query parameter takes precedence. + *

+ * Grid precision for geotile + *

+ * For a grid_agg of geotile, you can use cells in the + * aggs layer as tiles for lower zoom levels. + * grid_precision represents the additional zoom levels available + * through these cells. The final precision is computed by as follows: + * <zoom> + grid_precision. For example, if + * <zoom> is 7 and grid_precision is 8, then the + * geotile_grid aggregation will use a precision of 15. The maximum + * final precision is 29. The grid_precision also determines the + * number of cells for the grid as follows: + * (2^grid_precision) x (2^grid_precision). For example, a value of + * 8 divides the tile into a grid of 256 x 256 cells. The aggs + * layer only contains features for cells with matching data. + *

+ * Grid precision for geohex + *

+ * For a grid_agg of geohex, Elasticsearch uses + * <zoom> and grid_precision to calculate a + * final precision as follows: <zoom> + grid_precision. + *

+ * This precision determines the H3 resolution of the hexagonal cells produced + * by the geohex aggregation. The following table maps the H3 + * resolution for each precision. For example, if <zoom> is 3 + * and grid_precision is 3, the precision is 6. At a precision of + * 6, hexagonal cells have an H3 resolution of 2. If <zoom> + * is 3 and grid_precision is 4, the precision is 7. At a precision + * of 7, hexagonal cells have an H3 resolution of 3. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
14012230.5
21601227.625
364184213.15625
425618423.2890625
51024258825.744140625
64096258821.436035156
7163843411622.512329102
8655363411620.6280822754
926214442881221.099098206
10104857642881220.2747745514
114194304520168420.4808526039
12167772166141178820.8414913416
13671088646141178820.2103728354
142684354567988251620.3681524172
15107374182486917761220.644266719
16429496729686917761220.1610666797
1717179869184948424328420.2818666889
186871947673610338970298820.4932667053
19274877906944112372792091620.8632167343
201099511627776112372792091620.2158041836
2143980465111041216609544641220.3776573213
221759218604441613116266812488420.6609003122
237036874417766413116266812488420.165225078
2428147497671065614813867687418820.2891438866
251125899906842620155697073811931620.5060018015
264503599627370500155697073811931620.1265004504
2718014398509482000155697073811931620.03162511259
2872057594037927900155697073811931620.007906278149
29288230376151712000155697073811931620.001976569537
+ *

+ * Hexagonal cells don't align perfectly on a vector tile. Some cells may + * intersect more than one vector tile. To compute the H3 resolution for each + * precision, Elasticsearch compares the average density of hexagonal bins at + * each resolution with the average density of tile bins at each zoom level. + * Elasticsearch uses the H3 resolution that is closest to the corresponding + * geotile density. * * @see API * specification @@ -161,15 +515,25 @@ public static SearchMvtRequest of(Function - * Supports the following aggregation types: + * It supports the following aggregation types: *

    - *
  • avg
  • - *
  • cardinality
  • - *
  • max
  • - *
  • min
  • - *
  • sum
  • + *
  • avg
  • + *
  • boxplot
  • + *
  • cardinality
  • + *
  • extended stats
  • + *
  • max
  • + *
  • median absolute deviation
  • + *
  • min
  • + *
  • percentile
  • + *
  • percentile-rank
  • + *
  • stats
  • + *
  • sum
  • + *
  • value count
  • *
*

+ * The aggregation names can't start with _mvt_. The + * _mvt_ prefix is reserved for internal aggregations. + *

* API name: {@code aggs} */ public final Map aggs() { @@ -177,9 +541,9 @@ public final Map aggs() { } /** - * Size, in pixels, of a clipping buffer outside the tile. This allows renderers - * to avoid outline artifacts from geometries that extend past the extent of the - * tile. + * The size, in pixels, of a clipping buffer outside the tile. This allows + * renderers to avoid outline artifacts from geometries that extend past the + * extent of the tile. *

* API name: {@code buffer} */ @@ -189,11 +553,13 @@ public final Integer buffer() { } /** - * If false, the meta layer’s feature is the bounding box of the tile. If true, - * the meta layer’s feature is a bounding box resulting from a geo_bounds - * aggregation. The aggregation runs on <field> values that intersect the - * <zoom>/<x>/<y> tile with wrap_longitude set to false. The - * resulting bounding box may be larger than the vector tile. + * If false, the meta layer's feature is the bounding box of the + * tile. If true, the meta layer's feature is a bounding box + * resulting from a geo_bounds aggregation. The aggregation runs on + * <field> values that intersect the + * <zoom>/<x>/<y> tile with + * wrap_longitude set to false. The resulting bounding + * box may be larger than the vector tile. *

* API name: {@code exact_bounds} */ @@ -203,8 +569,8 @@ public final Boolean exactBounds() { } /** - * Size, in pixels, of a side of the tile. Vector tiles are square with equal - * sides. + * The size, in pixels, of a side of the tile. Vector tiles are square with + * equal sides. *

* API name: {@code extent} */ @@ -223,7 +589,7 @@ public final String field() { } /** - * Fields to return in the hits layer. Supports wildcards + * The fields to return in the hits layer. It supports wildcards * (*). This parameter does not support fields with array values. * Fields with array values may return inconsistent results. *

@@ -234,7 +600,7 @@ public final List fields() { } /** - * Aggregation used to create a grid for the field. + * The aggregation used to create a grid for the field. *

* API name: {@code grid_agg} */ @@ -245,8 +611,9 @@ public final GridAggregationType gridAgg() { /** * Additional zoom levels available through the aggs layer. For example, if - * <zoom> is 7 and grid_precision is 8, you can zoom in up to level 15. - * Accepts 0-8. If 0, results don’t include the aggs layer. + * <zoom> is 7 and grid_precision + * is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. *

* API name: {@code grid_precision} */ @@ -257,9 +624,9 @@ public final Integer gridPrecision() { /** * Determines the geometry type for features in the aggs layer. In the aggs - * layer, each feature represents a geotile_grid cell. If 'grid' each feature is - * a Polygon of the cells bounding box. If 'point' each feature is a Point that - * is the centroid of the cell. + * layer, each feature represents a geotile_grid cell. If + * grid, each feature is a polygon of the cells bounding box. If point`, + * each feature is a Point that is the centroid of the cell. *

* API name: {@code grid_type} */ @@ -279,7 +646,7 @@ public final List index() { } /** - * Query DSL used to filter documents for the search. + * The query DSL used to filter documents for the search. *

* API name: {@code query} */ @@ -299,8 +666,8 @@ public final Map runtimeMappings() { } /** - * Maximum number of features to return in the hits layer. Accepts 0-10000. If - * 0, results don’t include the hits layer. + * The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. *

* API name: {@code size} */ @@ -310,9 +677,9 @@ public final Integer size() { } /** - * Sorts features in the hits layer. By default, the API calculates a bounding - * box for each feature. It sorts features based on this box’s diagonal length, - * from longest to shortest. + * Sort the features in the hits layer. By default, the API calculates a + * bounding box for each feature. It sorts features based on this box's diagonal + * length, from longest to shortest. *

* API name: {@code sort} */ @@ -321,10 +688,10 @@ public final List sort() { } /** - * Number of hits matching the query to count accurately. If true, - * the exact number of hits is returned at the cost of some performance. If - * false, the response does not include the total number of hits - * matching the query. + * The number of hits matching the query to count accurately. If + * true, the exact number of hits is returned at the cost of some + * performance. If false, the response does not include the total + * number of hits matching the query. *

* API name: {@code track_total_hits} */ @@ -336,6 +703,21 @@ public final TrackHits trackTotalHits() { /** * If true, the hits and aggs layers will contain additional point * features representing suggested label positions for the original features. + *

    + *
  • Point and MultiPoint features will have one of + * the points selected.
  • + *
  • Polygon and MultiPolygon features will have a + * single point generated, either the centroid, if it is within the polygon, or + * another point within the polygon selected from the sorted triangle-tree.
  • + *
  • LineString features will likewise provide a roughly central + * point selected from the triangle-tree.
  • + *
  • The aggregation results will provide one central point for each + * aggregation bucket.
  • + *
+ *

+ * All attributes from the original features will also be copied to the new + * label features. In addition, the new features will be distinguishable using + * the tag _mvt_label_position. *

* API name: {@code with_labels} */ @@ -539,15 +921,25 @@ public static class Builder extends RequestBase.AbstractBuilder /** * Sub-aggregations for the geotile_grid. *

- * Supports the following aggregation types: + * It supports the following aggregation types: *

    - *
  • avg
  • - *
  • cardinality
  • - *
  • max
  • - *
  • min
  • - *
  • sum
  • + *
  • avg
  • + *
  • boxplot
  • + *
  • cardinality
  • + *
  • extended stats
  • + *
  • max
  • + *
  • median absolute deviation
  • + *
  • min
  • + *
  • percentile
  • + *
  • percentile-rank
  • + *
  • stats
  • + *
  • sum
  • + *
  • value count
  • *
*

+ * The aggregation names can't start with _mvt_. The + * _mvt_ prefix is reserved for internal aggregations. + *

* API name: {@code aggs} *

* Adds all entries of map to aggs. @@ -560,15 +952,25 @@ public final Builder aggs(Map map) { /** * Sub-aggregations for the geotile_grid. *

- * Supports the following aggregation types: + * It supports the following aggregation types: *

    - *
  • avg
  • - *
  • cardinality
  • - *
  • max
  • - *
  • min
  • - *
  • sum
  • + *
  • avg
  • + *
  • boxplot
  • + *
  • cardinality
  • + *
  • extended stats
  • + *
  • max
  • + *
  • median absolute deviation
  • + *
  • min
  • + *
  • percentile
  • + *
  • percentile-rank
  • + *
  • stats
  • + *
  • sum
  • + *
  • value count
  • *
*

+ * The aggregation names can't start with _mvt_. The + * _mvt_ prefix is reserved for internal aggregations. + *

* API name: {@code aggs} *

* Adds an entry to aggs. @@ -581,15 +983,25 @@ public final Builder aggs(String key, Aggregation value) { /** * Sub-aggregations for the geotile_grid. *

- * Supports the following aggregation types: + * It supports the following aggregation types: *

    - *
  • avg
  • - *
  • cardinality
  • - *
  • max
  • - *
  • min
  • - *
  • sum
  • + *
  • avg
  • + *
  • boxplot
  • + *
  • cardinality
  • + *
  • extended stats
  • + *
  • max
  • + *
  • median absolute deviation
  • + *
  • min
  • + *
  • percentile
  • + *
  • percentile-rank
  • + *
  • stats
  • + *
  • sum
  • + *
  • value count
  • *
*

+ * The aggregation names can't start with _mvt_. The + * _mvt_ prefix is reserved for internal aggregations. + *

* API name: {@code aggs} *

* Adds an entry to aggs using a builder lambda. @@ -599,9 +1011,9 @@ public final Builder aggs(String key, Function * API name: {@code buffer} */ @@ -611,11 +1023,13 @@ public final Builder buffer(@Nullable Integer value) { } /** - * If false, the meta layer’s feature is the bounding box of the tile. If true, - * the meta layer’s feature is a bounding box resulting from a geo_bounds - * aggregation. The aggregation runs on <field> values that intersect the - * <zoom>/<x>/<y> tile with wrap_longitude set to false. The - * resulting bounding box may be larger than the vector tile. + * If false, the meta layer's feature is the bounding box of the + * tile. If true, the meta layer's feature is a bounding box + * resulting from a geo_bounds aggregation. The aggregation runs on + * <field> values that intersect the + * <zoom>/<x>/<y> tile with + * wrap_longitude set to false. The resulting bounding + * box may be larger than the vector tile. *

* API name: {@code exact_bounds} */ @@ -625,8 +1039,8 @@ public final Builder exactBounds(@Nullable Boolean value) { } /** - * Size, in pixels, of a side of the tile. Vector tiles are square with equal - * sides. + * The size, in pixels, of a side of the tile. Vector tiles are square with + * equal sides. *

* API name: {@code extent} */ @@ -646,7 +1060,7 @@ public final Builder field(String value) { } /** - * Fields to return in the hits layer. Supports wildcards + * The fields to return in the hits layer. It supports wildcards * (*). This parameter does not support fields with array values. * Fields with array values may return inconsistent results. *

@@ -660,7 +1074,7 @@ public final Builder fields(List list) { } /** - * Fields to return in the hits layer. Supports wildcards + * The fields to return in the hits layer. It supports wildcards * (*). This parameter does not support fields with array values. * Fields with array values may return inconsistent results. *

@@ -674,7 +1088,7 @@ public final Builder fields(String value, String... values) { } /** - * Aggregation used to create a grid for the field. + * The aggregation used to create a grid for the field. *

* API name: {@code grid_agg} */ @@ -685,8 +1099,9 @@ public final Builder gridAgg(@Nullable GridAggregationType value) { /** * Additional zoom levels available through the aggs layer. For example, if - * <zoom> is 7 and grid_precision is 8, you can zoom in up to level 15. - * Accepts 0-8. If 0, results don’t include the aggs layer. + * <zoom> is 7 and grid_precision + * is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. *

* API name: {@code grid_precision} */ @@ -697,9 +1112,9 @@ public final Builder gridPrecision(@Nullable Integer value) { /** * Determines the geometry type for features in the aggs layer. In the aggs - * layer, each feature represents a geotile_grid cell. If 'grid' each feature is - * a Polygon of the cells bounding box. If 'point' each feature is a Point that - * is the centroid of the cell. + * layer, each feature represents a geotile_grid cell. If + * grid, each feature is a polygon of the cells bounding box. If point`, + * each feature is a Point that is the centroid of the cell. *

* API name: {@code grid_type} */ @@ -735,7 +1150,7 @@ public final Builder index(String value, String... values) { } /** - * Query DSL used to filter documents for the search. + * The query DSL used to filter documents for the search. *

* API name: {@code query} */ @@ -745,7 +1160,7 @@ public final Builder query(@Nullable Query value) { } /** - * Query DSL used to filter documents for the search. + * The query DSL used to filter documents for the search. *

* API name: {@code query} */ @@ -793,8 +1208,8 @@ public final Builder runtimeMappings(String key, } /** - * Maximum number of features to return in the hits layer. Accepts 0-10000. If - * 0, results don’t include the hits layer. + * The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. *

* API name: {@code size} */ @@ -804,9 +1219,9 @@ public final Builder size(@Nullable Integer value) { } /** - * Sorts features in the hits layer. By default, the API calculates a bounding - * box for each feature. It sorts features based on this box’s diagonal length, - * from longest to shortest. + * Sort the features in the hits layer. By default, the API calculates a + * bounding box for each feature. It sorts features based on this box's diagonal + * length, from longest to shortest. *

* API name: {@code sort} *

@@ -818,9 +1233,9 @@ public final Builder sort(List list) { } /** - * Sorts features in the hits layer. By default, the API calculates a bounding - * box for each feature. It sorts features based on this box’s diagonal length, - * from longest to shortest. + * Sort the features in the hits layer. By default, the API calculates a + * bounding box for each feature. It sorts features based on this box's diagonal + * length, from longest to shortest. *

* API name: {@code sort} *

@@ -832,9 +1247,9 @@ public final Builder sort(SortOptions value, SortOptions... values) { } /** - * Sorts features in the hits layer. By default, the API calculates a bounding - * box for each feature. It sorts features based on this box’s diagonal length, - * from longest to shortest. + * Sort the features in the hits layer. By default, the API calculates a + * bounding box for each feature. It sorts features based on this box's diagonal + * length, from longest to shortest. *

* API name: {@code sort} *

@@ -845,10 +1260,10 @@ public final Builder sort(Functiontrue, - * the exact number of hits is returned at the cost of some performance. If - * false, the response does not include the total number of hits - * matching the query. + * The number of hits matching the query to count accurately. If + * true, the exact number of hits is returned at the cost of some + * performance. If false, the response does not include the total + * number of hits matching the query. *

* API name: {@code track_total_hits} */ @@ -858,10 +1273,10 @@ public final Builder trackTotalHits(@Nullable TrackHits value) { } /** - * Number of hits matching the query to count accurately. If true, - * the exact number of hits is returned at the cost of some performance. If - * false, the response does not include the total number of hits - * matching the query. + * The number of hits matching the query to count accurately. If + * true, the exact number of hits is returned at the cost of some + * performance. If false, the response does not include the total + * number of hits matching the query. *

* API name: {@code track_total_hits} */ @@ -872,6 +1287,21 @@ public final Builder trackTotalHits(Functiontrue, the hits and aggs layers will contain additional point * features representing suggested label positions for the original features. + *

    + *
  • Point and MultiPoint features will have one of + * the points selected.
  • + *
  • Polygon and MultiPolygon features will have a + * single point generated, either the centroid, if it is within the polygon, or + * another point within the polygon selected from the sorted triangle-tree.
  • + *
  • LineString features will likewise provide a roughly central + * point selected from the triangle-tree.
  • + *
  • The aggregation results will provide one central point for each + * aggregation bucket.
  • + *
+ *

+ * All attributes from the original features will also be copied to the new + * label features. In addition, the new features will be distinguishable using + * the tag _mvt_label_position. *

* API name: {@code with_labels} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java index 1d39e93b8..78c8b80cb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java @@ -92,6 +92,29 @@ * Get search hits that match the query defined in the request. You can provide * search queries using the q query string parameter or the request * body. If both are specified, only the query parameter is used. + *

+ * If the Elasticsearch security features are enabled, you must have the read + * index privilege for the target data stream, index, or alias. For + * cross-cluster search, refer to the documentation about configuring CCS + * privileges. To search a point in time (PIT) for an alias, you must have the + * read index privilege for the alias's data streams or indices. + *

+ * Search slicing + *

+ * When paging through a large number of documents, it can be helpful to split + * the search into multiple slices to consume them independently with the + * slice and pit properties. By default the splitting + * is done first on the shards, then locally on each shard. The local splitting + * partitions the shard into contiguous ranges based on Lucene document IDs. + *

+ * For instance if the number of shards is equal to 2 and you request 4 slices, + * the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + * assigned to the second shard. + *

+ * IMPORTANT: The same point-in-time ID should be used for all slices. If + * different PIT IDs are used, slices can overlap and miss documents. This + * situation can occur because the splitting criterion is based on Lucene + * document IDs, which are not stable across changes to the index. * * @see API * specification @@ -319,8 +342,11 @@ public static SearchRequest of(Function> f } /** - * Indicates which source fields are returned for matching documents. These - * fields are returned in the hits._source property of the search response. + * The source fields that are returned for matching documents. These fields are + * returned in the hits._source property of the search response. If + * the stored_fields property is specified, the + * _source property defaults to false. Otherwise, it + * defaults to true. *

* API name: {@code _source} */ @@ -354,8 +380,13 @@ public final Boolean allowNoIndices() { } /** - * If true, returns partial results if there are shard request timeouts or shard - * failures. If false, returns an error with no partial results. + * If true and there are shard request timeouts or shard failures, + * the request returns partial results. If false, it returns an + * error with no partial results. + *

+ * To override the default behavior, you can set the + * search.default_allow_partial_results cluster setting to + * false. *

* API name: {@code allow_partial_search_results} */ @@ -365,8 +396,9 @@ public final Boolean allowPartialSearchResults() { } /** - * If true, wildcard and prefix queries are analyzed. This parameter can only be - * used when the q query string parameter is specified. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -376,8 +408,8 @@ public final Boolean analyzeWildcard() { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -388,9 +420,9 @@ public final String analyzer() { /** * The number of shard results that should be reduced at once on the - * coordinating node. This value should be used as a protection mechanism to - * reduce the memory overhead per search request if the potential number of - * shards in the request can be large. + * coordinating node. If the potential number of shards in the request can be + * large, this value should be used as a protection mechanism to reduce the + * memory overhead per search request. *

* API name: {@code batched_reduce_size} */ @@ -400,8 +432,9 @@ public final Long batchedReduceSize() { } /** - * If true, network round-trips between the coordinating node and the remote - * clusters are minimized when executing cross-cluster search (CCS) requests. + * If true, network round-trips between the coordinating node and + * the remote clusters are minimized when running cross-cluster search (CCS) + * requests. *

* API name: {@code ccs_minimize_roundtrips} */ @@ -421,8 +454,9 @@ public final FieldCollapse collapse() { } /** - * The default operator for query string query: AND or OR. This parameter can - * only be used when the q query string parameter is specified. + * The default operator for the query string query: AND or + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -432,9 +466,9 @@ public final Operator defaultOperator() { } /** - * Field to use as default where no field prefix is given in the query string. - * This parameter can only be used when the q query string parameter is - * specified. + * The field to use as a default when no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -444,9 +478,9 @@ public final String df() { } /** - * Array of wildcard (*) patterns. The request returns doc values - * for field names matching these patterns in the hits.fields - * property of the response. + * An array of wildcard (*) field patterns. The request returns doc + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code docvalue_fields} */ @@ -455,9 +489,9 @@ public final List docvalueFields() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values such as * open,hidden. *

* API name: {@code expand_wildcards} @@ -467,8 +501,8 @@ public final List expandWildcards() { } /** - * If true, returns detailed information about score computation as part of a - * hit. + * If true, the request returns detailed information about score + * computation as part of a hit. *

* API name: {@code explain} */ @@ -487,9 +521,9 @@ public final Map ext() { } /** - * Array of wildcard (*) patterns. The request returns values for - * field names matching these patterns in the hits.fields property - * of the response. + * An array of wildcard (*) field patterns. The request returns + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code fields} */ @@ -511,8 +545,8 @@ public final Boolean forceSyntheticSource() { } /** - * Starting document offset. Needs to be non-negative. By default, you cannot - * page through more than 10,000 hits using the from and + * The starting document offset, which must be non-negative. By default, you + * cannot page through more than 10,000 hits using the from and * size parameters. To page through more hits, use the * search_after parameter. *

@@ -539,7 +573,10 @@ public final Highlight highlight() { * when frozen. *

* API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated @Nullable public final Boolean ignoreThrottled() { return this.ignoreThrottled; @@ -557,8 +594,8 @@ public final Boolean ignoreUnavailable() { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -568,7 +605,10 @@ public final List index() { } /** - * Boosts the _score of documents from specified indices. + * Boost the _score of documents from specified indices. The boost + * value is the factor by which scores are multiplied. A boost value greater + * than 1.0 increases the score. A boost value between + * 0 and 1.0 decreases the score. *

* API name: {@code indices_boost} */ @@ -577,7 +617,7 @@ public final List> indicesBoost() { } /** - * Defines the approximate kNN search to run. + * The approximate kNN search to run. *

* API name: {@code knn} */ @@ -587,8 +627,8 @@ public final List knn() { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. This parameter can only - * be used when the q query string parameter is specified. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -598,7 +638,7 @@ public final Boolean lenient() { } /** - * Defines the number of concurrent shard requests per node this search executes + * The number of concurrent shard requests per node that the search runs * concurrently. This value should be used to limit the impact of the search on * the cluster in order to limit the number of concurrent shard requests. *

@@ -610,8 +650,8 @@ public final Long maxConcurrentShardRequests() { } /** - * Minimum _score for matching documents. Documents with a lower - * _score are not included in the search results. + * The minimum _score for matching documents. Documents with a + * lower _score are not included in the search results. *

* API name: {@code min_score} */ @@ -621,7 +661,7 @@ public final Double minScore() { } /** - * Limits the search to a point in time (PIT). If you provide a PIT, you cannot + * Limit the search to a point in time (PIT). If you provide a PIT, you cannot * specify an <index> in the request path. *

* API name: {@code pit} @@ -644,15 +684,18 @@ public final Query postFilter() { } /** - * Defines a threshold that enforces a pre-filter roundtrip to prefilter search - * shards based on query rewriting if the number of shards the search request - * expands to exceeds the threshold. This filter roundtrip can limit the number - * of shards significantly if for instance a shard can not match any documents + * A threshold that enforces a pre-filter roundtrip to prefilter search shards + * based on query rewriting if the number of shards the search request expands + * to exceeds the threshold. This filter roundtrip can limit the number of + * shards significantly if for instance a shard can not match any documents * based on its rewrite method (if date filters are mandatory to match but the * shard bounds and the query are disjoint). When unspecified, the pre-filter - * phase is executed if any of these conditions is met: the request targets more - * than 128 shards; the request targets one or more read-only index; the primary - * sort of the query targets an indexed field. + * phase is executed if any of these conditions is met: + *

    + *
  • The request targets more than 128 shards.
  • + *
  • The request targets one or more read-only index.
  • + *
  • The primary sort of the query targets an indexed field.
  • + *
*

* API name: {@code pre_filter_shard_size} */ @@ -662,22 +705,29 @@ public final Long preFilterShardSize() { } /** - * Nodes and shards used for the search. By default, Elasticsearch selects from - * eligible nodes and shards using adaptive replica selection, accounting for - * allocation awareness. Valid values are: _only_local to run the - * search only on shards on the local node; _local to, if possible, - * run the search on shards on the local node, or if not, select shards using - * the default method; _only_nodes:<node-id>,<node-id> - * to run the search on only the specified nodes IDs, where, if suitable shards - * exist on more than one selected node, use shards on those nodes using the - * default method, or if none of the specified nodes are available, select - * shards from any available node using the default method; - * _prefer_nodes:<node-id>,<node-id> to if possible, - * run the search on the specified nodes IDs, or if not, select shards using the - * default method; _shards:<shard>,<shard> to run the - * search only on the specified shards; <custom-string> (any - * string that does not start with _) to route searches with the - * same <custom-string> to the same shards in the same order. + * The nodes and shards used for the search. By default, Elasticsearch selects + * from eligible nodes and shards using adaptive replica selection, accounting + * for allocation awareness. Valid values are: + *

    + *
  • _only_local to run the search only on shards on the local + * node.
  • + *
  • _local to, if possible, run the search on shards on the + * local node, or if not, select shards using the default method.
  • + *
  • _only_nodes:<node-id>,<node-id> to run the + * search on only the specified nodes IDs. If suitable shards exist on more than + * one selected node, use shards on those nodes using the default method. If + * none of the specified nodes are available, select shards from any available + * node using the default method.
  • + *
  • _prefer_nodes:<node-id>,<node-id> to if + * possible, run the search on the specified nodes IDs. If not, select shards + * using the default method. _shards:<shard>,<shard> to + * run the search only on the specified shards. You can combine this value with + * other preference values. However, the _shards value + * must come first. For example: _shards:2,3|_local. + * <custom-string> (any string that does not start with + * _) to route searches with the same + * <custom-string> to the same shards in the same order.
  • + *
*

* API name: {@code preference} */ @@ -699,9 +749,12 @@ public final Boolean profile() { } /** - * Query in the Lucene query string syntax using query parameter search. Query - * parameter searches do not support the full Elasticsearch Query DSL but are - * handy for testing. + * A query in the Lucene query string syntax. Query parameter searches do not + * support the full Elasticsearch Query DSL but are handy for testing. + *

+ * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body + * parameter are not returned. *

* API name: {@code q} */ @@ -711,7 +764,7 @@ public final String q() { } /** - * Defines the search definition using the Query DSL. + * The search definition using the Query DSL. *

* API name: {@code query} */ @@ -721,7 +774,7 @@ public final Query query() { } /** - * Defines the Reciprocal Rank Fusion (RRF) to use. + * The Reciprocal Rank Fusion (RRF) to use. *

* API name: {@code rank} */ @@ -732,7 +785,8 @@ public final Rank rank() { /** * If true, the caching of search results is enabled for requests - * where size is 0. Defaults to index level settings. + * where size is 0. It defaults to index level + * settings. *

* API name: {@code request_cache} */ @@ -755,7 +809,7 @@ public final List rescore() { /** * A retriever is a specification to describe top documents returned from a * search. A retriever replaces other elements of the search API that also - * return top documents such as query and knn. + * return top documents such as query and knn. *

* API name: {@code retriever} */ @@ -765,7 +819,7 @@ public final Retriever retriever() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -775,7 +829,7 @@ public final String routing() { } /** - * Defines one or more runtime fields in the search request. These fields take + * One or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. *

* API name: {@code runtime_mappings} @@ -794,10 +848,9 @@ public final Map scriptFields() { } /** - * Period to retain the search context for scrolling. See Scroll search results. - * By default, this value cannot exceed 1d (24 hours). You can - * change this limit using the search.max_keep_alive cluster-level - * setting. + * The period to retain the search context for scrolling. By default, this value + * cannot exceed 1d (24 hours). You can change this limit by using + * the search.max_keep_alive cluster-level setting. *

* API name: {@code scroll} */ @@ -817,7 +870,8 @@ public final List searchAfter() { } /** - * How distributed term frequencies are calculated for relevance scoring. + * Indicates how distributed term frequencies are calculated for relevance + * scoring. *

* API name: {@code search_type} */ @@ -827,8 +881,8 @@ public final SearchType searchType() { } /** - * If true, returns sequence number and primary term of the last - * modification of each hit. + * If true, the request returns sequence number and primary term of + * the last modification of each hit. *

* API name: {@code seq_no_primary_term} */ @@ -838,9 +892,10 @@ public final Boolean seqNoPrimaryTerm() { } /** - * The number of hits to return. By default, you cannot page through more than - * 10,000 hits using the from and size parameters. To - * page through more hits, use the search_after parameter. + * The number of hits to return, which must not be negative. By default, you + * cannot page through more than 10,000 hits using the from and + * size parameters. To page through more hits, use the + * search_after property. *

* API name: {@code size} */ @@ -850,8 +905,8 @@ public final Integer size() { } /** - * Can be used to split a scrolled search into multiple slices that can be - * consumed independently. + * Split a scrolled search into multiple slices that can be consumed + * independently. *

* API name: {@code slice} */ @@ -870,9 +925,9 @@ public final List sort() { } /** - * Stats groups to associate with the search. Each group maintains a statistics - * aggregation for its associated searches. You can retrieve these stats using - * the indices stats API. + * The stats groups to associate with the search. Each group maintains a + * statistics aggregation for its associated searches. You can retrieve these + * stats using the indices stats API. *

* API name: {@code stats} */ @@ -881,11 +936,11 @@ public final List stats() { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can - * pass _source: true to return both source fields and stored - * fields in the search response. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source property defaults to + * false. You can pass _source: true to return both + * source fields and stored fields in the search response. *

* API name: {@code stored_fields} */ @@ -905,14 +960,16 @@ public final Suggester suggest() { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects - * documents before sorting. Use with caution. Elasticsearch applies this - * parameter to each shard handling the request. When possible, let - * Elasticsearch perform early termination automatically. Avoid specifying this - * parameter for requests that target data streams with backing indices across - * multiple data tiers. If set to 0 (default), the query does not - * terminate early. + * documents before sorting. + *

+ * IMPORTANT: Use with caution. Elasticsearch applies this property to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this property for requests that + * target data streams with backing indices across multiple data tiers. + *

+ * If set to 0 (default), the query does not terminate early. *

* API name: {@code terminate_after} */ @@ -922,9 +979,9 @@ public final Long terminateAfter() { } /** - * Specifies the period of time to wait for a response from each shard. If no - * response is received before the timeout expires, the request fails and - * returns an error. Defaults to no timeout. + * The period of time to wait for a response from each shard. If no response is + * received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. *

* API name: {@code timeout} */ @@ -934,8 +991,8 @@ public final String timeout() { } /** - * If true, calculate and return document scores, even if the scores are not - * used for sorting. + * If true, calculate and return document scores, even if the + * scores are not used for sorting. *

* API name: {@code track_scores} */ @@ -958,7 +1015,8 @@ public final TrackHits trackTotalHits() { } /** - * If true, returns document version as part of a hit. + * If true, the request returns the document version as part of a + * hit. *

* API name: {@code version} */ @@ -1410,8 +1468,11 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Boolean version; /** - * Indicates which source fields are returned for matching documents. These - * fields are returned in the hits._source property of the search response. + * The source fields that are returned for matching documents. These fields are + * returned in the hits._source property of the search response. If + * the stored_fields property is specified, the + * _source property defaults to false. Otherwise, it + * defaults to true. *

* API name: {@code _source} */ @@ -1421,8 +1482,11 @@ public final Builder source(@Nullable SourceConfig value) { } /** - * Indicates which source fields are returned for matching documents. These - * fields are returned in the hits._source property of the search response. + * The source fields that are returned for matching documents. These fields are + * returned in the hits._source property of the search response. If + * the stored_fields property is specified, the + * _source property defaults to false. Otherwise, it + * defaults to true. *

* API name: {@code _source} */ @@ -1481,8 +1545,13 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** - * If true, returns partial results if there are shard request timeouts or shard - * failures. If false, returns an error with no partial results. + * If true and there are shard request timeouts or shard failures, + * the request returns partial results. If false, it returns an + * error with no partial results. + *

+ * To override the default behavior, you can set the + * search.default_allow_partial_results cluster setting to + * false. *

* API name: {@code allow_partial_search_results} */ @@ -1492,8 +1561,9 @@ public final Builder allowPartialSearchResults(@Nullable Boolean value) { } /** - * If true, wildcard and prefix queries are analyzed. This parameter can only be - * used when the q query string parameter is specified. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -1503,8 +1573,8 @@ public final Builder analyzeWildcard(@Nullable Boolean value) { } /** - * Analyzer to use for the query string. This parameter can only be used when - * the q query string parameter is specified. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -1515,9 +1585,9 @@ public final Builder analyzer(@Nullable String value) { /** * The number of shard results that should be reduced at once on the - * coordinating node. This value should be used as a protection mechanism to - * reduce the memory overhead per search request if the potential number of - * shards in the request can be large. + * coordinating node. If the potential number of shards in the request can be + * large, this value should be used as a protection mechanism to reduce the + * memory overhead per search request. *

* API name: {@code batched_reduce_size} */ @@ -1527,8 +1597,9 @@ public final Builder batchedReduceSize(@Nullable Long value) { } /** - * If true, network round-trips between the coordinating node and the remote - * clusters are minimized when executing cross-cluster search (CCS) requests. + * If true, network round-trips between the coordinating node and + * the remote clusters are minimized when running cross-cluster search (CCS) + * requests. *

* API name: {@code ccs_minimize_roundtrips} */ @@ -1557,8 +1628,9 @@ public final Builder collapse(Functionq query string parameter is specified. + * The default operator for the query string query: AND or + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -1568,9 +1640,9 @@ public final Builder defaultOperator(@Nullable Operator value) { } /** - * Field to use as default where no field prefix is given in the query string. - * This parameter can only be used when the q query string parameter is - * specified. + * The field to use as a default when no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -1580,9 +1652,9 @@ public final Builder df(@Nullable String value) { } /** - * Array of wildcard (*) patterns. The request returns doc values - * for field names matching these patterns in the hits.fields - * property of the response. + * An array of wildcard (*) field patterns. The request returns doc + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code docvalue_fields} *

@@ -1594,9 +1666,9 @@ public final Builder docvalueFields(List list) { } /** - * Array of wildcard (*) patterns. The request returns doc values - * for field names matching these patterns in the hits.fields - * property of the response. + * An array of wildcard (*) field patterns. The request returns doc + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code docvalue_fields} *

@@ -1608,9 +1680,9 @@ public final Builder docvalueFields(FieldAndFormat value, FieldAndFormat... valu } /** - * Array of wildcard (*) patterns. The request returns doc values - * for field names matching these patterns in the hits.fields - * property of the response. + * An array of wildcard (*) field patterns. The request returns doc + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code docvalue_fields} *

@@ -1621,9 +1693,9 @@ public final Builder docvalueFields(Function

* API name: {@code expand_wildcards} @@ -1636,9 +1708,9 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values such as * open,hidden. *

* API name: {@code expand_wildcards} @@ -1651,8 +1723,8 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val } /** - * If true, returns detailed information about score computation as part of a - * hit. + * If true, the request returns detailed information about score + * computation as part of a hit. *

* API name: {@code explain} */ @@ -1686,9 +1758,9 @@ public final Builder ext(String key, JsonData value) { } /** - * Array of wildcard (*) patterns. The request returns values for - * field names matching these patterns in the hits.fields property - * of the response. + * An array of wildcard (*) field patterns. The request returns + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code fields} *

@@ -1700,9 +1772,9 @@ public final Builder fields(List list) { } /** - * Array of wildcard (*) patterns. The request returns values for - * field names matching these patterns in the hits.fields property - * of the response. + * An array of wildcard (*) field patterns. The request returns + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code fields} *

@@ -1714,9 +1786,9 @@ public final Builder fields(FieldAndFormat value, FieldAndFormat... values) { } /** - * Array of wildcard (*) patterns. The request returns values for - * field names matching these patterns in the hits.fields property - * of the response. + * An array of wildcard (*) field patterns. The request returns + * values for field names matching these patterns in the + * hits.fields property of the response. *

* API name: {@code fields} *

@@ -1740,8 +1812,8 @@ public final Builder forceSyntheticSource(@Nullable Boolean value) { } /** - * Starting document offset. Needs to be non-negative. By default, you cannot - * page through more than 10,000 hits using the from and + * The starting document offset, which must be non-negative. By default, you + * cannot page through more than 10,000 hits using the from and * size parameters. To page through more hits, use the * search_after parameter. *

@@ -1778,7 +1850,10 @@ public final Builder highlight(Function * API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated public final Builder ignoreThrottled(@Nullable Boolean value) { this.ignoreThrottled = value; return this; @@ -1796,8 +1871,8 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -1810,8 +1885,8 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). To search all data streams and indices, + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, * omit this parameter or use * or _all. *

* API name: {@code index} @@ -1824,7 +1899,10 @@ public final Builder index(String value, String... values) { } /** - * Boosts the _score of documents from specified indices. + * Boost the _score of documents from specified indices. The boost + * value is the factor by which scores are multiplied. A boost value greater + * than 1.0 increases the score. A boost value between + * 0 and 1.0 decreases the score. *

* API name: {@code indices_boost} *

@@ -1836,7 +1914,10 @@ public final Builder indicesBoost(List> list) { } /** - * Boosts the _score of documents from specified indices. + * Boost the _score of documents from specified indices. The boost + * value is the factor by which scores are multiplied. A boost value greater + * than 1.0 increases the score. A boost value between + * 0 and 1.0 decreases the score. *

* API name: {@code indices_boost} *

@@ -1848,7 +1929,7 @@ public final Builder indicesBoost(Map value, Map } /** - * Defines the approximate kNN search to run. + * The approximate kNN search to run. *

* API name: {@code knn} *

@@ -1860,7 +1941,7 @@ public final Builder knn(List list) { } /** - * Defines the approximate kNN search to run. + * The approximate kNN search to run. *

* API name: {@code knn} *

@@ -1872,7 +1953,7 @@ public final Builder knn(KnnSearch value, KnnSearch... values) { } /** - * Defines the approximate kNN search to run. + * The approximate kNN search to run. *

* API name: {@code knn} *

@@ -1884,8 +1965,8 @@ public final Builder knn(Function> f /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. This parameter can only - * be used when the q query string parameter is specified. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -1895,7 +1976,7 @@ public final Builder lenient(@Nullable Boolean value) { } /** - * Defines the number of concurrent shard requests per node this search executes + * The number of concurrent shard requests per node that the search runs * concurrently. This value should be used to limit the impact of the search on * the cluster in order to limit the number of concurrent shard requests. *

@@ -1907,8 +1988,8 @@ public final Builder maxConcurrentShardRequests(@Nullable Long value) { } /** - * Minimum _score for matching documents. Documents with a lower - * _score are not included in the search results. + * The minimum _score for matching documents. Documents with a + * lower _score are not included in the search results. *

* API name: {@code min_score} */ @@ -1918,7 +1999,7 @@ public final Builder minScore(@Nullable Double value) { } /** - * Limits the search to a point in time (PIT). If you provide a PIT, you cannot + * Limit the search to a point in time (PIT). If you provide a PIT, you cannot * specify an <index> in the request path. *

* API name: {@code pit} @@ -1929,7 +2010,7 @@ public final Builder pit(@Nullable PointInTimeReference value) { } /** - * Limits the search to a point in time (PIT). If you provide a PIT, you cannot + * Limit the search to a point in time (PIT). If you provide a PIT, you cannot * specify an <index> in the request path. *

* API name: {@code pit} @@ -1962,15 +2043,18 @@ public final Builder postFilter(Function> fn } /** - * Defines a threshold that enforces a pre-filter roundtrip to prefilter search - * shards based on query rewriting if the number of shards the search request - * expands to exceeds the threshold. This filter roundtrip can limit the number - * of shards significantly if for instance a shard can not match any documents + * A threshold that enforces a pre-filter roundtrip to prefilter search shards + * based on query rewriting if the number of shards the search request expands + * to exceeds the threshold. This filter roundtrip can limit the number of + * shards significantly if for instance a shard can not match any documents * based on its rewrite method (if date filters are mandatory to match but the * shard bounds and the query are disjoint). When unspecified, the pre-filter - * phase is executed if any of these conditions is met: the request targets more - * than 128 shards; the request targets one or more read-only index; the primary - * sort of the query targets an indexed field. + * phase is executed if any of these conditions is met: + *

    + *
  • The request targets more than 128 shards.
  • + *
  • The request targets one or more read-only index.
  • + *
  • The primary sort of the query targets an indexed field.
  • + *
*

* API name: {@code pre_filter_shard_size} */ @@ -1980,22 +2064,29 @@ public final Builder preFilterShardSize(@Nullable Long value) { } /** - * Nodes and shards used for the search. By default, Elasticsearch selects from - * eligible nodes and shards using adaptive replica selection, accounting for - * allocation awareness. Valid values are: _only_local to run the - * search only on shards on the local node; _local to, if possible, - * run the search on shards on the local node, or if not, select shards using - * the default method; _only_nodes:<node-id>,<node-id> - * to run the search on only the specified nodes IDs, where, if suitable shards - * exist on more than one selected node, use shards on those nodes using the - * default method, or if none of the specified nodes are available, select - * shards from any available node using the default method; - * _prefer_nodes:<node-id>,<node-id> to if possible, - * run the search on the specified nodes IDs, or if not, select shards using the - * default method; _shards:<shard>,<shard> to run the - * search only on the specified shards; <custom-string> (any - * string that does not start with _) to route searches with the - * same <custom-string> to the same shards in the same order. + * The nodes and shards used for the search. By default, Elasticsearch selects + * from eligible nodes and shards using adaptive replica selection, accounting + * for allocation awareness. Valid values are: + *

    + *
  • _only_local to run the search only on shards on the local + * node.
  • + *
  • _local to, if possible, run the search on shards on the + * local node, or if not, select shards using the default method.
  • + *
  • _only_nodes:<node-id>,<node-id> to run the + * search on only the specified nodes IDs. If suitable shards exist on more than + * one selected node, use shards on those nodes using the default method. If + * none of the specified nodes are available, select shards from any available + * node using the default method.
  • + *
  • _prefer_nodes:<node-id>,<node-id> to if + * possible, run the search on the specified nodes IDs. If not, select shards + * using the default method. _shards:<shard>,<shard> to + * run the search only on the specified shards. You can combine this value with + * other preference values. However, the _shards value + * must come first. For example: _shards:2,3|_local. + * <custom-string> (any string that does not start with + * _) to route searches with the same + * <custom-string> to the same shards in the same order.
  • + *
*

* API name: {@code preference} */ @@ -2017,9 +2108,12 @@ public final Builder profile(@Nullable Boolean value) { } /** - * Query in the Lucene query string syntax using query parameter search. Query - * parameter searches do not support the full Elasticsearch Query DSL but are - * handy for testing. + * A query in the Lucene query string syntax. Query parameter searches do not + * support the full Elasticsearch Query DSL but are handy for testing. + *

+ * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body + * parameter are not returned. *

* API name: {@code q} */ @@ -2029,7 +2123,7 @@ public final Builder q(@Nullable String value) { } /** - * Defines the search definition using the Query DSL. + * The search definition using the Query DSL. *

* API name: {@code query} */ @@ -2039,7 +2133,7 @@ public final Builder query(@Nullable Query value) { } /** - * Defines the search definition using the Query DSL. + * The search definition using the Query DSL. *

* API name: {@code query} */ @@ -2048,7 +2142,7 @@ public final Builder query(Function> fn) { } /** - * Defines the Reciprocal Rank Fusion (RRF) to use. + * The Reciprocal Rank Fusion (RRF) to use. *

* API name: {@code rank} */ @@ -2058,7 +2152,7 @@ public final Builder rank(@Nullable Rank value) { } /** - * Defines the Reciprocal Rank Fusion (RRF) to use. + * The Reciprocal Rank Fusion (RRF) to use. *

* API name: {@code rank} */ @@ -2068,7 +2162,8 @@ public final Builder rank(Function> fn) { /** * If true, the caching of search results is enabled for requests - * where size is 0. Defaults to index level settings. + * where size is 0. It defaults to index level + * settings. *

* API name: {@code request_cache} */ @@ -2121,7 +2216,7 @@ public final Builder rescore(Function> f /** * A retriever is a specification to describe top documents returned from a * search. A retriever replaces other elements of the search API that also - * return top documents such as query and knn. + * return top documents such as query and knn. *

* API name: {@code retriever} */ @@ -2133,7 +2228,7 @@ public final Builder retriever(@Nullable Retriever value) { /** * A retriever is a specification to describe top documents returned from a * search. A retriever replaces other elements of the search API that also - * return top documents such as query and knn. + * return top documents such as query and knn. *

* API name: {@code retriever} */ @@ -2142,7 +2237,7 @@ public final Builder retriever(Function * API name: {@code routing} */ @@ -2152,7 +2247,7 @@ public final Builder routing(@Nullable String value) { } /** - * Defines one or more runtime fields in the search request. These fields take + * One or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. *

* API name: {@code runtime_mappings} @@ -2165,7 +2260,7 @@ public final Builder runtimeMappings(Map map) { } /** - * Defines one or more runtime fields in the search request. These fields take + * One or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. *

* API name: {@code runtime_mappings} @@ -2178,7 +2273,7 @@ public final Builder runtimeMappings(String key, RuntimeField value) { } /** - * Defines one or more runtime fields in the search request. These fields take + * One or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. *

* API name: {@code runtime_mappings} @@ -2226,10 +2321,9 @@ public final Builder scriptFields(String key, Function1d (24 hours). You can - * change this limit using the search.max_keep_alive cluster-level - * setting. + * The period to retain the search context for scrolling. By default, this value + * cannot exceed 1d (24 hours). You can change this limit by using + * the search.max_keep_alive cluster-level setting. *

* API name: {@code scroll} */ @@ -2239,10 +2333,9 @@ public final Builder scroll(@Nullable Time value) { } /** - * Period to retain the search context for scrolling. See Scroll search results. - * By default, this value cannot exceed 1d (24 hours). You can - * change this limit using the search.max_keep_alive cluster-level - * setting. + * The period to retain the search context for scrolling. By default, this value + * cannot exceed 1d (24 hours). You can change this limit by using + * the search.max_keep_alive cluster-level setting. *

* API name: {@code scroll} */ @@ -2361,7 +2454,8 @@ public final Builder searchAfter(Function * API name: {@code search_type} */ @@ -2371,8 +2465,8 @@ public final Builder searchType(@Nullable SearchType value) { } /** - * If true, returns sequence number and primary term of the last - * modification of each hit. + * If true, the request returns sequence number and primary term of + * the last modification of each hit. *

* API name: {@code seq_no_primary_term} */ @@ -2382,9 +2476,10 @@ public final Builder seqNoPrimaryTerm(@Nullable Boolean value) { } /** - * The number of hits to return. By default, you cannot page through more than - * 10,000 hits using the from and size parameters. To - * page through more hits, use the search_after parameter. + * The number of hits to return, which must not be negative. By default, you + * cannot page through more than 10,000 hits using the from and + * size parameters. To page through more hits, use the + * search_after property. *

* API name: {@code size} */ @@ -2394,8 +2489,8 @@ public final Builder size(@Nullable Integer value) { } /** - * Can be used to split a scrolled search into multiple slices that can be - * consumed independently. + * Split a scrolled search into multiple slices that can be consumed + * independently. *

* API name: {@code slice} */ @@ -2405,8 +2500,8 @@ public final Builder slice(@Nullable SlicedScroll value) { } /** - * Can be used to split a scrolled search into multiple slices that can be - * consumed independently. + * Split a scrolled search into multiple slices that can be consumed + * independently. *

* API name: {@code slice} */ @@ -2450,9 +2545,9 @@ public final Builder sort(Function * API name: {@code stats} *

@@ -2464,9 +2559,9 @@ public final Builder stats(List list) { } /** - * Stats groups to associate with the search. Each group maintains a statistics - * aggregation for its associated searches. You can retrieve these stats using - * the indices stats API. + * The stats groups to associate with the search. Each group maintains a + * statistics aggregation for its associated searches. You can retrieve these + * stats using the indices stats API. *

* API name: {@code stats} *

@@ -2478,11 +2573,11 @@ public final Builder stats(String value, String... values) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can - * pass _source: true to return both source fields and stored - * fields in the search response. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source property defaults to + * false. You can pass _source: true to return both + * source fields and stored fields in the search response. *

* API name: {@code stored_fields} *

@@ -2494,11 +2589,11 @@ public final Builder storedFields(List list) { } /** - * List of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, - * the _source parameter defaults to false. You can - * pass _source: true to return both source fields and stored - * fields in the search response. + * A comma-separated list of stored fields to return as part of a hit. If no + * fields are specified, no stored fields are included in the response. If this + * field is specified, the _source property defaults to + * false. You can pass _source: true to return both + * source fields and stored fields in the search response. *

* API name: {@code stored_fields} *

@@ -2531,14 +2626,16 @@ public final Builder suggest(Function0 (default), the query does not - * terminate early. + * documents before sorting. + *

+ * IMPORTANT: Use with caution. Elasticsearch applies this property to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this property for requests that + * target data streams with backing indices across multiple data tiers. + *

+ * If set to 0 (default), the query does not terminate early. *

* API name: {@code terminate_after} */ @@ -2548,9 +2645,9 @@ public final Builder terminateAfter(@Nullable Long value) { } /** - * Specifies the period of time to wait for a response from each shard. If no - * response is received before the timeout expires, the request fails and - * returns an error. Defaults to no timeout. + * The period of time to wait for a response from each shard. If no response is + * received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. *

* API name: {@code timeout} */ @@ -2560,8 +2657,8 @@ public final Builder timeout(@Nullable String value) { } /** - * If true, calculate and return document scores, even if the scores are not - * used for sorting. + * If true, calculate and return document scores, even if the + * scores are not used for sorting. *

* API name: {@code track_scores} */ @@ -2596,7 +2693,8 @@ public final Builder trackTotalHits(Functiontrue, the request returns the document version as part of a + * hit. *

* API name: {@code version} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java index 61b64a53c..7814cb784 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java @@ -65,7 +65,11 @@ * Get the indices and shards that a search request would be run against. This * information can be useful for working out issues or planning optimizations * with routing and shard preferences. When filtered aliases are used, the - * filter is returned as part of the indices section. + * filter is returned as part of the indices section. + *

+ * If the Elasticsearch security features are enabled, you must have the + * view_index_metadata or manage index privilege for + * the target data stream, index, or alias. * * @see API * specification @@ -154,8 +158,9 @@ public final Boolean ignoreUnavailable() { } /** - * Returns the indices and shards that a search request would be executed - * against. + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} */ @@ -175,7 +180,10 @@ public final Boolean local() { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. IT can also be set to -1 to indicate that the request + * should never timeout. *

* API name: {@code master_timeout} */ @@ -185,7 +193,7 @@ public final Time masterTimeout() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -196,7 +204,7 @@ public final String preference() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -299,8 +307,9 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Returns the indices and shards that a search request would be executed - * against. + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} *

@@ -312,8 +321,9 @@ public final Builder index(List list) { } /** - * Returns the indices and shards that a search request would be executed - * against. + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). To search all data streams and indices, + * omit this parameter or use * or _all. *

* API name: {@code index} *

@@ -336,7 +346,10 @@ public final Builder local(@Nullable Boolean value) { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. IT can also be set to -1 to indicate that the request + * should never timeout. *

* API name: {@code master_timeout} */ @@ -346,7 +359,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. If the master node is + * not available before the timeout expires, the request fails and returns an + * error. IT can also be set to -1 to indicate that the request + * should never timeout. *

* API name: {@code master_timeout} */ @@ -355,7 +371,7 @@ public final Builder masterTimeout(Function> f } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -366,7 +382,7 @@ public final Builder preference(@Nullable String value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java index fcf729ccb..73c98d090 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java @@ -166,7 +166,7 @@ public final Boolean ccsMinimizeRoundtrips() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. Valid values are: all, @@ -181,7 +181,8 @@ public final List expandWildcards() { /** * If true, returns detailed information about score calculation as - * part of each hit. + * part of each hit. If you specify both this and the explain query + * parameter, the API uses only the query parameter. *

* API name: {@code explain} */ @@ -191,8 +192,8 @@ public final Boolean explain() { } /** - * ID of the search template to use. If no source is specified, this parameter - * is required. + * The ID of the search template to use. If no source is specified, + * this parameter is required. *

* API name: {@code id} */ @@ -206,7 +207,10 @@ public final String id() { * not included in the response when throttled. *

* API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated @Nullable public final Boolean ignoreThrottled() { return this.ignoreThrottled; @@ -224,8 +228,8 @@ public final Boolean ignoreUnavailable() { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). *

* API name: {@code index} */ @@ -244,7 +248,7 @@ public final Map params() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -265,7 +269,7 @@ public final Boolean profile() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -297,8 +301,8 @@ public final SearchType searchType() { /** * An inline search template. Supports the same parameters as the search API's - * request body. Also supports Mustache variables. If no id is specified, this - * parameter is required. + * request body. It also supports Mustache variables. If no id is + * specified, this parameter is required. *

* API name: {@code source} */ @@ -433,7 +437,7 @@ public final Builder ccsMinimizeRoundtrips(@Nullable Boolean value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. Valid values are: all, @@ -450,7 +454,7 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * open,hidden. Valid values are: all, @@ -468,7 +472,8 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val /** * If true, returns detailed information about score calculation as - * part of each hit. + * part of each hit. If you specify both this and the explain query + * parameter, the API uses only the query parameter. *

* API name: {@code explain} */ @@ -478,8 +483,8 @@ public final Builder explain(@Nullable Boolean value) { } /** - * ID of the search template to use. If no source is specified, this parameter - * is required. + * The ID of the search template to use. If no source is specified, + * this parameter is required. *

* API name: {@code id} */ @@ -493,7 +498,10 @@ public final Builder id(@Nullable String value) { * not included in the response when throttled. *

* API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 */ + @Deprecated public final Builder ignoreThrottled(@Nullable Boolean value) { this.ignoreThrottled = value; return this; @@ -511,8 +519,8 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). *

* API name: {@code index} *

@@ -524,8 +532,8 @@ public final Builder index(List list) { } /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (*). + * A comma-separated list of data streams, indices, and aliases to search. It + * supports wildcards (*). *

* API name: {@code index} *

@@ -563,7 +571,7 @@ public final Builder params(String key, JsonData value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -584,7 +592,7 @@ public final Builder profile(@Nullable Boolean value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -626,8 +634,8 @@ public final Builder searchType(@Nullable SearchType value) { /** * An inline search template. Supports the same parameters as the search API's - * request body. Also supports Mustache variables. If no id is specified, this - * parameter is required. + * request body. It also supports Mustache variables. If no id is + * specified, this parameter is required. *

* API name: {@code source} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumRequest.java index 8e9c8adad..3952ab45e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumRequest.java @@ -64,19 +64,16 @@ /** * Get terms in an index. *

- * Discover terms that match a partial string in an index. This "terms - * enum" API is designed for low-latency look-ups used in auto-complete - * scenarios. + * Discover terms that match a partial string in an index. This API is designed + * for low-latency look-ups used in auto-complete scenarios.

*

- * If the complete property in the response is false, the returned - * terms set may be incomplete and should be treated as approximate. This can - * occur due to a few reasons, such as a request timeout or a node error. - *

- * NOTE: The terms enum API may return terms from deleted documents. Deleted + * info The terms enum API may return terms from deleted documents. Deleted * documents are initially only marked as deleted. It is not until their * segments are merged that documents are actually deleted. Until that happens, * the terms enum API will return terms from these documents. - * + *

+ *
+ * * @see API * specification */ @@ -124,8 +121,8 @@ public static TermsEnumRequest of(Functiontrue, the provided search string is matched against index + * terms without case sensitivity. *

* API name: {@code case_insensitive} */ @@ -145,8 +142,10 @@ public final String field() { } /** - * Required - Comma-separated list of data streams, indices, and index aliases - * to search. Wildcard (*) expressions are supported. + * Required - A comma-separated list of data streams, indices, and index aliases + * to search. Wildcard (*) expressions are supported. To search all + * data streams or indices, omit this parameter or use * or + * _all. *

* API name: {@code index} */ @@ -155,7 +154,8 @@ public final String index() { } /** - * Allows to filter an index shard if the provided query rewrites to match_none. + * Filter an index shard if the provided query rewrites to + * match_none. *

* API name: {@code index_filter} */ @@ -165,6 +165,10 @@ public final Query indexFilter() { } /** + * The string after which terms in the index should be returned. It allows for a + * form of pagination if the last result from one request is passed as the + * search_after parameter for a subsequent request. + *

* API name: {@code search_after} */ @Nullable @@ -173,7 +177,7 @@ public final String searchAfter() { } /** - * How many matching terms to return. + * The number of matching terms to return. *

* API name: {@code size} */ @@ -183,9 +187,13 @@ public final Integer size() { } /** - * The string after which terms in the index should be returned. Allows for a - * form of pagination if the last result from one request is passed as the - * search_after parameter for a subsequent request. + * The string to match at the start of indexed terms. If it is not provided, all + * terms in the field are considered.

+ *

+ * info The prefix string cannot be larger than the largest possible keyword + * value, which is Lucene's term byte-length limit of 32766. + *

+ *
*

* API name: {@code string} */ @@ -195,9 +203,9 @@ public final String string() { } /** - * The maximum length of time to spend collecting results. Defaults to - * "1s" (one second). If the timeout is exceeded the complete flag set - * to false in the response and the results may be partial or empty. + * The maximum length of time to spend collecting results. If the timeout is + * exceeded the complete flag set to false in the + * response and the results may be partial or empty. *

* API name: {@code timeout} */ @@ -285,8 +293,8 @@ public static class Builder extends RequestBase.AbstractBuilder private Time timeout; /** - * When true the provided search string is matched against index terms without - * case sensitivity. + * When true, the provided search string is matched against index + * terms without case sensitivity. *

* API name: {@code case_insensitive} */ @@ -307,8 +315,10 @@ public final Builder field(String value) { } /** - * Required - Comma-separated list of data streams, indices, and index aliases - * to search. Wildcard (*) expressions are supported. + * Required - A comma-separated list of data streams, indices, and index aliases + * to search. Wildcard (*) expressions are supported. To search all + * data streams or indices, omit this parameter or use * or + * _all. *

* API name: {@code index} */ @@ -318,7 +328,8 @@ public final Builder index(String value) { } /** - * Allows to filter an index shard if the provided query rewrites to match_none. + * Filter an index shard if the provided query rewrites to + * match_none. *

* API name: {@code index_filter} */ @@ -328,7 +339,8 @@ public final Builder indexFilter(@Nullable Query value) { } /** - * Allows to filter an index shard if the provided query rewrites to match_none. + * Filter an index shard if the provided query rewrites to + * match_none. *

* API name: {@code index_filter} */ @@ -337,6 +349,10 @@ public final Builder indexFilter(Function> f } /** + * The string after which terms in the index should be returned. It allows for a + * form of pagination if the last result from one request is passed as the + * search_after parameter for a subsequent request. + *

* API name: {@code search_after} */ public final Builder searchAfter(@Nullable String value) { @@ -345,7 +361,7 @@ public final Builder searchAfter(@Nullable String value) { } /** - * How many matching terms to return. + * The number of matching terms to return. *

* API name: {@code size} */ @@ -355,9 +371,13 @@ public final Builder size(@Nullable Integer value) { } /** - * The string after which terms in the index should be returned. Allows for a - * form of pagination if the last result from one request is passed as the - * search_after parameter for a subsequent request. + * The string to match at the start of indexed terms. If it is not provided, all + * terms in the field are considered.

+ *

+ * info The prefix string cannot be larger than the largest possible keyword + * value, which is Lucene's term byte-length limit of 32766. + *

+ *
*

* API name: {@code string} */ @@ -367,9 +387,9 @@ public final Builder string(@Nullable String value) { } /** - * The maximum length of time to spend collecting results. Defaults to - * "1s" (one second). If the timeout is exceeded the complete flag set - * to false in the response and the results may be partial or empty. + * The maximum length of time to spend collecting results. If the timeout is + * exceeded the complete flag set to false in the + * response and the results may be partial or empty. *

* API name: {@code timeout} */ @@ -379,9 +399,9 @@ public final Builder timeout(@Nullable Time value) { } /** - * The maximum length of time to spend collecting results. Defaults to - * "1s" (one second). If the timeout is exceeded the complete flag set - * to false in the response and the results may be partial or empty. + * The maximum length of time to spend collecting results. If the timeout is + * exceeded the complete flag set to false in the + * response and the results may be partial or empty. *

* API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumResponse.java index 894a2bed5..edee4a04d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermsEnumResponse.java @@ -97,7 +97,11 @@ public final List terms() { } /** - * Required - API name: {@code complete} + * Required - If false, the returned terms set may be incomplete + * and should be treated as approximate. This can occur due to a few reasons, + * such as a request timeout or a node error. + *

+ * API name: {@code complete} */ public final boolean complete() { return this.complete; @@ -186,7 +190,11 @@ public final Builder terms(String value, String... values) { } /** - * Required - API name: {@code complete} + * Required - If false, the returned terms set may be incomplete + * and should be treated as approximate. This can occur due to a few reasons, + * such as a request timeout or a node error. + *

+ * API name: {@code complete} */ public final Builder complete(boolean value) { this.complete = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java index 6aa697c67..8b0b18d8f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java @@ -71,6 +71,56 @@ *

* Get information and statistics about terms in the fields of a particular * document. + *

+ * You can retrieve term vectors for documents stored in the index or for + * artificial documents passed in the body of the request. You can specify the + * fields you are interested in through the fields parameter or by + * adding the fields to the request body. For example: + * + *

+ * GET /my-index-000001/_termvectors/1?fields=message
+ * 
+ * 
+ *

+ * Fields can be specified using wildcards, similar to the multi match query. + *

+ * Term vectors are real-time by default, not near real-time. This can be + * changed by setting realtime parameter to false. + *

+ * You can request three types of values: term information, term + * statistics, and field statistics. By default, all term + * information and field statistics are returned for all fields but term + * statistics are excluded. + *

+ * Term information + *

    + *
  • term frequency in the field (always returned)
  • + *
  • term positions (positions: true)
  • + *
  • start and end offsets (offsets: true)
  • + *
  • term payloads (payloads: true), as base64 encoded bytes
  • + *
+ *

+ * If the requested information wasn't stored in the index, it will be computed + * on the fly if possible. Additionally, term vectors could be computed for + * documents not even existing in the index, but instead provided by the user. + *

+ *

+ * warn Start and end offsets assume UTF-16 encoding is being used. If you want + * to use these offsets in order to get the original text that produced this + * token, you should make sure that the string you are taking a sub-string of is + * also encoded using UTF-16. + *

+ *
+ *

+ * Behaviour + *

+ * The term and field statistics are not accurate. Deleted documents are not + * taken into account. The information is only retrieved for the shard the + * requested document resides in. The term and field statistics are therefore + * only useful as relative measures whereas the absolute numbers have no meaning + * in this context. By default, when requesting term vectors of artificial + * documents, a shard to get the statistics from is randomly selected. Use + * routing only to hit a particular shard. * * @see API * specification @@ -166,8 +216,14 @@ public final TDocument doc() { } /** - * If true, the response includes the document count, sum of - * document frequencies, and sum of total term frequencies. + * If true, the response includes: + *

    + *
  • The document count (how many documents contain this field).
  • + *
  • The sum of document frequencies (the sum of document frequencies for all + * terms in this field).
  • + *
  • The sum of total term frequencies (the sum of total term frequencies of + * each term in this field).
  • + *
*

* API name: {@code field_statistics} */ @@ -177,10 +233,10 @@ public final Boolean fieldStatistics() { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} */ @@ -189,7 +245,9 @@ public final List fields() { } /** - * Filter terms based on their tf-idf scores. + * Filter terms based on their tf-idf scores. This could be useful in order find + * out a good characteristic vector of a document. This feature works in a + * similar manner to the second phase of the More Like This Query. *

* API name: {@code filter} */ @@ -199,7 +257,7 @@ public final Filter filter() { } /** - * Unique identifier of the document. + * A unique identifier for the document. *

* API name: {@code id} */ @@ -209,7 +267,7 @@ public final String id() { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -238,7 +296,10 @@ public final Boolean payloads() { } /** - * Overrides the default per-field analyzer. + * Override the default per-field analyzer. This is useful in order to generate + * term vectors in any fashion, especially when using artificial documents. When + * providing an analyzer for a field that already stores term vectors, the term + * vectors will be regenerated. *

* API name: {@code per_field_analyzer} */ @@ -257,7 +318,7 @@ public final Boolean positions() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -278,7 +339,7 @@ public final Boolean realtime() { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -288,8 +349,15 @@ public final String routing() { } /** - * If true, the response includes term frequency and document - * frequency. + * If true, the response includes: + *

    + *
  • The total term frequency (how often a term occurs in all documents).
  • + *
  • The document frequency (the number of documents containing the current + * term).
  • + *
+ *

+ * By default these values are not returned since term statistics can have a + * serious performance impact. *

* API name: {@code term_statistics} */ @@ -309,7 +377,7 @@ public final Long version() { } /** - * Specific version type. + * The version type. *

* API name: {@code version_type} */ @@ -424,8 +492,14 @@ public final Builder doc(@Nullable TDocument value) { } /** - * If true, the response includes the document count, sum of - * document frequencies, and sum of total term frequencies. + * If true, the response includes: + *

    + *
  • The document count (how many documents contain this field).
  • + *
  • The sum of document frequencies (the sum of document frequencies for all + * terms in this field).
  • + *
  • The sum of total term frequencies (the sum of total term frequencies of + * each term in this field).
  • + *
*

* API name: {@code field_statistics} */ @@ -435,10 +509,10 @@ public final Builder fieldStatistics(@Nullable Boolean value) { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} *

@@ -450,10 +524,10 @@ public final Builder fields(List list) { } /** - * Comma-separated list or wildcard expressions of fields to include in the - * statistics. Used as the default list unless a specific field list is provided - * in the completion_fields or fielddata_fields - * parameters. + * A comma-separated list or wildcard expressions of fields to include in the + * statistics. It is used as the default list unless a specific field list is + * provided in the completion_fields or + * fielddata_fields parameters. *

* API name: {@code fields} *

@@ -465,7 +539,9 @@ public final Builder fields(String value, String... values) { } /** - * Filter terms based on their tf-idf scores. + * Filter terms based on their tf-idf scores. This could be useful in order find + * out a good characteristic vector of a document. This feature works in a + * similar manner to the second phase of the More Like This Query. *

* API name: {@code filter} */ @@ -475,7 +551,9 @@ public final Builder filter(@Nullable Filter value) { } /** - * Filter terms based on their tf-idf scores. + * Filter terms based on their tf-idf scores. This could be useful in order find + * out a good characteristic vector of a document. This feature works in a + * similar manner to the second phase of the More Like This Query. *

* API name: {@code filter} */ @@ -484,7 +562,7 @@ public final Builder filter(Function * API name: {@code id} */ @@ -494,7 +572,7 @@ public final Builder id(@Nullable String value) { } /** - * Required - Name of the index that contains the document. + * Required - The name of the index that contains the document. *

* API name: {@code index} */ @@ -524,7 +602,10 @@ public final Builder payloads(@Nullable Boolean value) { } /** - * Overrides the default per-field analyzer. + * Override the default per-field analyzer. This is useful in order to generate + * term vectors in any fashion, especially when using artificial documents. When + * providing an analyzer for a field that already stores term vectors, the term + * vectors will be regenerated. *

* API name: {@code per_field_analyzer} *

@@ -536,7 +617,10 @@ public final Builder perFieldAnalyzer(Map map) { } /** - * Overrides the default per-field analyzer. + * Override the default per-field analyzer. This is useful in order to generate + * term vectors in any fashion, especially when using artificial documents. When + * providing an analyzer for a field that already stores term vectors, the term + * vectors will be regenerated. *

* API name: {@code per_field_analyzer} *

@@ -558,7 +642,7 @@ public final Builder positions(@Nullable Boolean value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -579,7 +663,7 @@ public final Builder realtime(@Nullable Boolean value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -589,8 +673,15 @@ public final Builder routing(@Nullable String value) { } /** - * If true, the response includes term frequency and document - * frequency. + * If true, the response includes: + *

    + *
  • The total term frequency (how often a term occurs in all documents).
  • + *
  • The document frequency (the number of documents containing the current + * term).
  • + *
+ *

+ * By default these values are not returned since term statistics can have a + * serious performance impact. *

* API name: {@code term_statistics} */ @@ -610,7 +701,7 @@ public final Builder version(@Nullable Long value) { } /** - * Specific version type. + * The version type. *

* API name: {@code version_type} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java index 869a86813..9beaeb857 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java @@ -76,6 +76,139 @@ * query is specified, performs an update on every document in the data stream * or index without modifying the source, which is useful for picking up mapping * changes. + *

+ * If the Elasticsearch security features are enabled, you must have the + * following index privileges for the target data stream, index, or alias: + *

    + *
  • read
  • + *
  • index or write
  • + *
+ *

+ * You can specify the query criteria in the request URI or the request body + * using the same syntax as the search API. + *

+ * When you submit an update by query request, Elasticsearch gets a snapshot of + * the data stream or index when it begins processing the request and updates + * matching documents using internal versioning. When the versions match, the + * document is updated and the version number is incremented. If a document + * changes between the time that the snapshot is taken and the update operation + * is processed, it results in a version conflict and the operation fails. You + * can opt to count version conflicts instead of halting and returning by + * setting conflicts to proceed. Note that if you opt + * to count version conflicts, the operation could attempt to update more + * documents from the source than max_docs until it has + * successfully updated max_docs documents or it has gone through + * every document in the source query. + *

+ * NOTE: Documents with a version equal to 0 cannot be updated using update by + * query because internal versioning does not support 0 as a valid version + * number. + *

+ * While processing an update by query request, Elasticsearch performs multiple + * search requests sequentially to find all of the matching documents. A bulk + * update request is performed for each batch of matching documents. Any query + * or update failures cause the update by query request to fail and the failures + * are shown in the response. Any update requests that completed successfully + * still stick, they are not rolled back. + *

+ * Throttling update requests + *

+ * To control the rate at which update by query issues batches of update + * operations, you can set requests_per_second to any positive + * decimal number. This pads each batch with a wait time to throttle the rate. + * Set requests_per_second to -1 to turn off + * throttling. + *

+ * Throttling uses a wait time between batches so that the internal scroll + * requests can be given a timeout that takes the request padding into account. + * The padding time is the difference between the batch size divided by the + * requests_per_second and the time spent writing. By default the + * batch size is 1000, so if requests_per_second is set to + * 500: + * + *

+ * target_time = 1000 / 500 per second = 2 seconds
+ * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+ * 
+ * 
+ *

+ * Since the batch is issued as a single _bulk request, large batch sizes cause + * Elasticsearch to create many requests and wait before starting the next set. + * This is "bursty" instead of "smooth". + *

+ * Slicing + *

+ * Update by query supports sliced scroll to parallelize the update process. + * This can improve efficiency and provide a convenient way to break the request + * down into smaller parts. + *

+ * Setting slices to auto chooses a reasonable number + * for most data streams and indices. This setting will use one slice per shard, + * up to a certain limit. If there are multiple source data streams or indices, + * it will choose the number of slices based on the index or backing index with + * the smallest number of shards. + *

+ * Adding slices to _update_by_query just automates + * the manual process of creating sub-requests, which means it has some quirks: + *

    + *
  • You can see these requests in the tasks APIs. These sub-requests are + * "child" tasks of the task for the request with slices.
  • + *
  • Fetching the status of the task for the request with slices + * only contains the status of completed slices.
  • + *
  • These sub-requests are individually addressable for things like + * cancellation and rethrottling.
  • + *
  • Rethrottling the request with slices will rethrottle the + * unfinished sub-request proportionally.
  • + *
  • Canceling the request with slices will cancel each sub-request.
  • + *
  • Due to the nature of slices each sub-request won't get a perfectly even + * portion of the documents. All documents will be addressed, but some slices + * may be larger than others. Expect larger slices to have a more even + * distribution.
  • + *
  • Parameters like requests_per_second and + * max_docs on a request with slices are distributed proportionally + * to each sub-request. Combine that with the point above about distribution + * being uneven and you should conclude that using max_docs with + * slices might not result in exactly max_docs + * documents being updated.
  • + *
  • Each sub-request gets a slightly different snapshot of the source data + * stream or index though these are all taken at approximately the same + * time.
  • + *
+ *

+ * If you're slicing manually or otherwise tuning automatic slicing, keep in + * mind that: + *

    + *
  • Query performance is most efficient when the number of slices is equal to + * the number of shards in the index or backing index. If that number is large + * (for example, 500), choose a lower number as too many slices hurts + * performance. Setting slices higher than the number of shards generally does + * not improve efficiency and adds overhead.
  • + *
  • Update performance scales linearly across available resources with the + * number of slices.
  • + *
+ *

+ * Whether query or update performance dominates the runtime depends on the + * documents being reindexed and cluster resources. + *

+ * Update the document source + *

+ * Update by query supports scripts to update the document source. As with the + * update API, you can set ctx.op to change the operation that is + * performed. + *

+ * Set ctx.op = "noop" if your script decides that it + * doesn't have to make any changes. The update by query operation skips + * updating the document and increments the noop counter. + *

+ * Set ctx.op = "delete" if your script decides that the + * document should be deleted. The update by query operation deletes the + * document and increments the deleted counter. + *

+ * Update by query supports only index, noop, and + * delete. Setting ctx.op to anything else is an + * error. Setting any other field in ctx is an error. This API + * enables you to only modify the source of matching documents; you cannot move + * them. * * @see API * specification @@ -245,7 +378,9 @@ public final Boolean allowNoIndices() { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -255,7 +390,8 @@ public final Boolean analyzeWildcard() { } /** - * Analyzer to use for the query string. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -265,8 +401,8 @@ public final String analyzer() { } /** - * What to do if update by query hits version conflicts: abort or - * proceed. + * The preferred behavior when update by query hits version conflicts: + * abort or proceed. *

* API name: {@code conflicts} */ @@ -277,7 +413,8 @@ public final Conflicts conflicts() { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -287,7 +424,9 @@ public final Operator defaultOperator() { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -297,9 +436,9 @@ public final String df() { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -332,8 +471,8 @@ public final Boolean ignoreUnavailable() { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -344,7 +483,8 @@ public final List index() { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -364,8 +504,8 @@ public final Long maxDocs() { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, then setting the value to * _none disables the default ingest pipeline for this request. If * a final pipeline is configured it will always run, regardless of the value of * this parameter. @@ -378,7 +518,7 @@ public final String pipeline() { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -389,7 +529,7 @@ public final String preference() { } /** - * Query in the Lucene query string syntax. + * A query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -399,7 +539,7 @@ public final String q() { } /** - * Specifies the documents to update using the Query DSL. + * The documents to update using the Query DSL. *

* API name: {@code query} */ @@ -410,7 +550,9 @@ public final Query query() { /** * If true, Elasticsearch refreshes affected shards to make the - * operation visible to search. + * operation visible to search after the request completes. This is different + * than the update API's refresh parameter, which causes just the + * shard that received the request to be refreshed. *

* API name: {@code refresh} */ @@ -420,7 +562,8 @@ public final Boolean refresh() { } /** - * If true, the request cache is used for this request. + * If true, the request cache is used for this request. It defaults + * to the index-level setting. *

* API name: {@code request_cache} */ @@ -440,7 +583,7 @@ public final Float requestsPerSecond() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -460,7 +603,7 @@ public final Script script() { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -470,7 +613,7 @@ public final Time scroll() { } /** - * Size of the scroll request that powers the operation. + * The size of the scroll request that powers the operation. *

* API name: {@code scroll_size} */ @@ -480,7 +623,7 @@ public final Long scrollSize() { } /** - * Explicit timeout for each search request. + * An explicit timeout for each search request. By default, there is no timeout. *

* API name: {@code search_timeout} */ @@ -490,8 +633,8 @@ public final Time searchTimeout() { } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. Available options include + * query_then_fetch and dfs_query_then_fetch. *

* API name: {@code search_type} */ @@ -531,7 +674,7 @@ public final List sort() { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -541,13 +684,14 @@ public final List stats() { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects - * documents before sorting. Use with caution. Elasticsearch applies this - * parameter to each shard handling the request. When possible, let - * Elasticsearch perform early termination automatically. Avoid specifying this - * parameter for requests that target data streams with backing indices across - * multiple data tiers. + * documents before sorting. + *

+ * IMPORTANT: Use with caution. Elasticsearch applies this parameter to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this parameter for requests that + * target data streams with backing indices across multiple data tiers. *

* API name: {@code terminate_after} */ @@ -557,8 +701,10 @@ public final Long terminateAfter() { } /** - * Period each update request waits for the following operations: dynamic - * mapping updates, waiting for active shards. + * The period each update request waits for the following operations: dynamic + * mapping updates, waiting for active shards. By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -591,7 +737,10 @@ public final Boolean versionType() { /** * The number of shard copies that must be active before proceeding with the * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * number of shards in the index (number_of_replicas+1). The + * timeout parameter controls how long each write request waits for + * unavailable shards to become available. Both work exactly the way they work + * in the bulk API. *

* API name: {@code wait_for_active_shards} */ @@ -601,7 +750,11 @@ public final WaitForActiveShards waitForActiveShards() { } /** - * If true, the request blocks until the operation is complete. + * If true, the request blocks until the operation is complete. If + * false, Elasticsearch performs some preflight checks, launches + * the request, and returns a task ID that you can use to cancel or get the + * status of the task. Elasticsearch creates a record of this task as a document + * at .tasks/task/${taskId}. *

* API name: {@code wait_for_completion} */ @@ -777,7 +930,9 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** - * If true, wildcard and prefix queries are analyzed. + * If true, wildcard and prefix queries are analyzed. This + * parameter can be used only when the q query string parameter is + * specified. *

* API name: {@code analyze_wildcard} */ @@ -787,7 +942,8 @@ public final Builder analyzeWildcard(@Nullable Boolean value) { } /** - * Analyzer to use for the query string. + * The analyzer to use for the query string. This parameter can be used only + * when the q query string parameter is specified. *

* API name: {@code analyzer} */ @@ -797,8 +953,8 @@ public final Builder analyzer(@Nullable String value) { } /** - * What to do if update by query hits version conflicts: abort or - * proceed. + * The preferred behavior when update by query hits version conflicts: + * abort or proceed. *

* API name: {@code conflicts} */ @@ -809,7 +965,8 @@ public final Builder conflicts(@Nullable Conflicts value) { /** * The default operator for query string query: AND or - * OR. + * OR. This parameter can be used only when the q + * query string parameter is specified. *

* API name: {@code default_operator} */ @@ -819,7 +976,9 @@ public final Builder defaultOperator(@Nullable Operator value) { } /** - * Field to use as default where no field prefix is given in the query string. + * The field to use as default where no field prefix is given in the query + * string. This parameter can be used only when the q query string + * parameter is specified. *

* API name: {@code df} */ @@ -829,9 +988,9 @@ public final Builder df(@Nullable String value) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -846,9 +1005,9 @@ public final Builder expandWildcards(List list) { } /** - * Type of index that wildcard patterns can match. If the request can target + * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match - * hidden data streams. Supports comma-separated values, such as + * hidden data streams. It supports comma-separated values, such as * open,hidden. Valid values are: all, * open, closed, hidden, * none. @@ -884,8 +1043,8 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -898,8 +1057,8 @@ public final Builder index(List list) { } /** - * Required - Comma-separated list of data streams, indices, and aliases to - * search. Supports wildcards (*). To search all data streams or + * Required - A comma-separated list of data streams, indices, and aliases to + * search. It supports wildcards (*). To search all data streams or * indices, omit this parameter or use * or _all. *

* API name: {@code index} @@ -913,7 +1072,8 @@ public final Builder index(String value, String... values) { /** * If true, format-based query failures (such as providing text to - * a numeric field) in the query string will be ignored. + * a numeric field) in the query string will be ignored. This parameter can be + * used only when the q query string parameter is specified. *

* API name: {@code lenient} */ @@ -933,8 +1093,8 @@ public final Builder maxDocs(@Nullable Long value) { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, then setting the value to * _none disables the default ingest pipeline for this request. If * a final pipeline is configured it will always run, regardless of the value of * this parameter. @@ -947,7 +1107,7 @@ public final Builder pipeline(@Nullable String value) { } /** - * Specifies the node or shard the operation should be performed on. Random by + * The node or shard the operation should be performed on. It is random by * default. *

* API name: {@code preference} @@ -958,7 +1118,7 @@ public final Builder preference(@Nullable String value) { } /** - * Query in the Lucene query string syntax. + * A query in the Lucene query string syntax. *

* API name: {@code q} */ @@ -968,7 +1128,7 @@ public final Builder q(@Nullable String value) { } /** - * Specifies the documents to update using the Query DSL. + * The documents to update using the Query DSL. *

* API name: {@code query} */ @@ -978,7 +1138,7 @@ public final Builder query(@Nullable Query value) { } /** - * Specifies the documents to update using the Query DSL. + * The documents to update using the Query DSL. *

* API name: {@code query} */ @@ -988,7 +1148,9 @@ public final Builder query(Function> fn) { /** * If true, Elasticsearch refreshes affected shards to make the - * operation visible to search. + * operation visible to search after the request completes. This is different + * than the update API's refresh parameter, which causes just the + * shard that received the request to be refreshed. *

* API name: {@code refresh} */ @@ -998,7 +1160,8 @@ public final Builder refresh(@Nullable Boolean value) { } /** - * If true, the request cache is used for this request. + * If true, the request cache is used for this request. It defaults + * to the index-level setting. *

* API name: {@code request_cache} */ @@ -1018,7 +1181,7 @@ public final Builder requestsPerSecond(@Nullable Float value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -1047,7 +1210,7 @@ public final Builder script(Function> fn) } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -1057,7 +1220,7 @@ public final Builder scroll(@Nullable Time value) { } /** - * Period to retain the search context for scrolling. + * The period to retain the search context for scrolling. *

* API name: {@code scroll} */ @@ -1066,7 +1229,7 @@ public final Builder scroll(Function> fn) { } /** - * Size of the scroll request that powers the operation. + * The size of the scroll request that powers the operation. *

* API name: {@code scroll_size} */ @@ -1076,7 +1239,7 @@ public final Builder scrollSize(@Nullable Long value) { } /** - * Explicit timeout for each search request. + * An explicit timeout for each search request. By default, there is no timeout. *

* API name: {@code search_timeout} */ @@ -1086,7 +1249,7 @@ public final Builder searchTimeout(@Nullable Time value) { } /** - * Explicit timeout for each search request. + * An explicit timeout for each search request. By default, there is no timeout. *

* API name: {@code search_timeout} */ @@ -1095,8 +1258,8 @@ public final Builder searchTimeout(Function> f } /** - * The type of the search operation. Available options: - * query_then_fetch, dfs_query_then_fetch. + * The type of the search operation. Available options include + * query_then_fetch and dfs_query_then_fetch. *

* API name: {@code search_type} */ @@ -1170,7 +1333,7 @@ public final Builder sort(String value, String... values) { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -1183,7 +1346,7 @@ public final Builder stats(List list) { } /** - * Specific tag of the request for logging and statistical + * The specific tag of the request for logging and statistical * purposes. *

* API name: {@code stats} @@ -1196,13 +1359,14 @@ public final Builder stats(String value, String... values) { } /** - * Maximum number of documents to collect for each shard. If a query reaches + * The maximum number of documents to collect for each shard. If a query reaches * this limit, Elasticsearch terminates the query early. Elasticsearch collects - * documents before sorting. Use with caution. Elasticsearch applies this - * parameter to each shard handling the request. When possible, let - * Elasticsearch perform early termination automatically. Avoid specifying this - * parameter for requests that target data streams with backing indices across - * multiple data tiers. + * documents before sorting. + *

+ * IMPORTANT: Use with caution. Elasticsearch applies this parameter to each + * shard handling the request. When possible, let Elasticsearch perform early + * termination automatically. Avoid specifying this parameter for requests that + * target data streams with backing indices across multiple data tiers. *

* API name: {@code terminate_after} */ @@ -1212,8 +1376,10 @@ public final Builder terminateAfter(@Nullable Long value) { } /** - * Period each update request waits for the following operations: dynamic - * mapping updates, waiting for active shards. + * The period each update request waits for the following operations: dynamic + * mapping updates, waiting for active shards. By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -1223,8 +1389,10 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period each update request waits for the following operations: dynamic - * mapping updates, waiting for active shards. + * The period each update request waits for the following operations: dynamic + * mapping updates, waiting for active shards. By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. *

* API name: {@code timeout} */ @@ -1256,7 +1424,10 @@ public final Builder versionType(@Nullable Boolean value) { /** * The number of shard copies that must be active before proceeding with the * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * number of shards in the index (number_of_replicas+1). The + * timeout parameter controls how long each write request waits for + * unavailable shards to become available. Both work exactly the way they work + * in the bulk API. *

* API name: {@code wait_for_active_shards} */ @@ -1268,7 +1439,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { /** * The number of shard copies that must be active before proceeding with the * operation. Set to all or any positive integer up to the total - * number of shards in the index (number_of_replicas+1). + * number of shards in the index (number_of_replicas+1). The + * timeout parameter controls how long each write request waits for + * unavailable shards to become available. Both work exactly the way they work + * in the bulk API. *

* API name: {@code wait_for_active_shards} */ @@ -1278,7 +1452,11 @@ public final Builder waitForActiveShards( } /** - * If true, the request blocks until the operation is complete. + * If true, the request blocks until the operation is complete. If + * false, Elasticsearch performs some preflight checks, launches + * the request, and returns a task ID that you can use to cancel or get the + * status of the task. Elasticsearch creates a record of this task as a document + * at .tasks/task/${taskId}. *

* API name: {@code wait_for_completion} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryResponse.java index bdc232667..c2fd641b3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryResponse.java @@ -142,6 +142,8 @@ public static UpdateByQueryResponse of(Function * API name: {@code batches} */ @Nullable @@ -150,6 +152,13 @@ public final Long batches() { } /** + * Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. Update + * by query is implemented using batches. Any failure causes the entire process + * to end, but all failures in the current batch are collected into the array. + * You can use the conflicts option to prevent reindex from ending + * when version conflicts occur. + *

* API name: {@code failures} */ public final List failures() { @@ -157,6 +166,9 @@ public final List failures() { } /** + * The number of documents that were ignored because the script used for the + * update by query returned a noop value for ctx.op. + *

* API name: {@code noops} */ @Nullable @@ -165,6 +177,8 @@ public final Long noops() { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ @Nullable @@ -173,6 +187,8 @@ public final Long deleted() { } /** + * The number of requests per second effectively run during the update by query. + *

* API name: {@code requests_per_second} */ @Nullable @@ -181,6 +197,10 @@ public final Float requestsPerSecond() { } /** + * The number of retries attempted by update by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ @Nullable @@ -197,6 +217,8 @@ public final String task() { } /** + * If true, some requests timed out during the update by query. + *

* API name: {@code timed_out} */ @Nullable @@ -205,6 +227,8 @@ public final Boolean timedOut() { } /** + * The number of milliseconds from start to end of the whole operation. + *

* API name: {@code took} */ @Nullable @@ -213,6 +237,8 @@ public final Long took() { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ @Nullable @@ -221,6 +247,8 @@ public final Long total() { } /** + * The number of documents that were successfully updated. + *

* API name: {@code updated} */ @Nullable @@ -229,6 +257,8 @@ public final Long updated() { } /** + * The number of version conflicts that the update by query hit. + *

* API name: {@code version_conflicts} */ @Nullable @@ -245,6 +275,9 @@ public final Time throttled() { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ @Nullable @@ -261,6 +294,11 @@ public final Time throttledUntil() { } /** + * This field should always be equal to zero in an _update_by_query response. It + * only has meaning when using the task API, where it indicates the next time + * (in milliseconds since epoch) a throttled request will be run again in order + * to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ @Nullable @@ -430,6 +468,8 @@ public static class Builder extends WithJsonObjectBuilderBase private Long throttledUntilMillis; /** + * The number of scroll responses pulled back by the update by query. + *

* API name: {@code batches} */ public final Builder batches(@Nullable Long value) { @@ -438,6 +478,13 @@ public final Builder batches(@Nullable Long value) { } /** + * Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. Update + * by query is implemented using batches. Any failure causes the entire process + * to end, but all failures in the current batch are collected into the array. + * You can use the conflicts option to prevent reindex from ending + * when version conflicts occur. + *

* API name: {@code failures} *

* Adds all elements of list to failures. @@ -448,6 +495,13 @@ public final Builder failures(List list) { } /** + * Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. Update + * by query is implemented using batches. Any failure causes the entire process + * to end, but all failures in the current batch are collected into the array. + * You can use the conflicts option to prevent reindex from ending + * when version conflicts occur. + *

* API name: {@code failures} *

* Adds one or more values to failures. @@ -458,6 +512,13 @@ public final Builder failures(BulkIndexByScrollFailure value, BulkIndexByScrollF } /** + * Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. Update + * by query is implemented using batches. Any failure causes the entire process + * to end, but all failures in the current batch are collected into the array. + * You can use the conflicts option to prevent reindex from ending + * when version conflicts occur. + *

* API name: {@code failures} *

* Adds a value to failures using a builder lambda. @@ -468,6 +529,9 @@ public final Builder failures( } /** + * The number of documents that were ignored because the script used for the + * update by query returned a noop value for ctx.op. + *

* API name: {@code noops} */ public final Builder noops(@Nullable Long value) { @@ -476,6 +540,8 @@ public final Builder noops(@Nullable Long value) { } /** + * The number of documents that were successfully deleted. + *

* API name: {@code deleted} */ public final Builder deleted(@Nullable Long value) { @@ -484,6 +550,8 @@ public final Builder deleted(@Nullable Long value) { } /** + * The number of requests per second effectively run during the update by query. + *

* API name: {@code requests_per_second} */ public final Builder requestsPerSecond(@Nullable Float value) { @@ -492,6 +560,10 @@ public final Builder requestsPerSecond(@Nullable Float value) { } /** + * The number of retries attempted by update by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ public final Builder retries(@Nullable Retries value) { @@ -500,6 +572,10 @@ public final Builder retries(@Nullable Retries value) { } /** + * The number of retries attempted by update by query. bulk is the + * number of bulk actions retried. search is the number of search + * actions retried. + *

* API name: {@code retries} */ public final Builder retries(Function> fn) { @@ -515,6 +591,8 @@ public final Builder task(@Nullable String value) { } /** + * If true, some requests timed out during the update by query. + *

* API name: {@code timed_out} */ public final Builder timedOut(@Nullable Boolean value) { @@ -523,6 +601,8 @@ public final Builder timedOut(@Nullable Boolean value) { } /** + * The number of milliseconds from start to end of the whole operation. + *

* API name: {@code took} */ public final Builder took(@Nullable Long value) { @@ -531,6 +611,8 @@ public final Builder took(@Nullable Long value) { } /** + * The number of documents that were successfully processed. + *

* API name: {@code total} */ public final Builder total(@Nullable Long value) { @@ -539,6 +621,8 @@ public final Builder total(@Nullable Long value) { } /** + * The number of documents that were successfully updated. + *

* API name: {@code updated} */ public final Builder updated(@Nullable Long value) { @@ -547,6 +631,8 @@ public final Builder updated(@Nullable Long value) { } /** + * The number of version conflicts that the update by query hit. + *

* API name: {@code version_conflicts} */ public final Builder versionConflicts(@Nullable Long value) { @@ -570,6 +656,9 @@ public final Builder throttled(Function> fn) { } /** + * The number of milliseconds the request slept to conform to + * requests_per_second. + *

* API name: {@code throttled_millis} */ public final Builder throttledMillis(@Nullable Long value) { @@ -593,6 +682,11 @@ public final Builder throttledUntil(Function> } /** + * This field should always be equal to zero in an _update_by_query response. It + * only has meaning when using the task API, where it indicates the next time + * (in milliseconds since epoch) a throttled request will be run again in order + * to conform to requests_per_second. + *

* API name: {@code throttled_until_millis} */ public final Builder throttledUntilMillis(@Nullable Long value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRethrottleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRethrottleRequest.java index b6ee9aba7..d2db64b21 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRethrottleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRethrottleRequest.java @@ -89,7 +89,8 @@ public static UpdateByQueryRethrottleRequest of( } /** - * The throttle for this request in sub-requests per second. + * The throttle for this request in sub-requests per second. To turn off + * throttling, set it to -1. *

* API name: {@code requests_per_second} */ @@ -122,7 +123,8 @@ public static class Builder extends RequestBase.AbstractBuilder private String taskId; /** - * The throttle for this request in sub-requests per second. + * The throttle for this request in sub-requests per second. To turn off + * throttling, set it to -1. *

* API name: {@code requests_per_second} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateRequest.java index 205755afa..4a2017e3f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateRequest.java @@ -69,8 +69,33 @@ // typedef: _global.update.Request /** - * Update a document. Updates a document by running a script or passing a - * partial document. + * Update a document. + *

+ * Update a document by running a script or passing a partial document. + *

+ * If the Elasticsearch security features are enabled, you must have the + * index or write index privilege for the target index + * or index alias. + *

+ * The script can update, delete, or skip modifying the document. The API also + * supports passing a partial document, which is merged into the existing + * document. To fully replace an existing document, use the index API. This + * operation: + *

    + *
  • Gets the document (collocated with the shard) from the index.
  • + *
  • Runs the specified script.
  • + *
  • Indexes the result.
  • + *
+ *

+ * The document must still be reindexed, but using this API removes some network + * roundtrips and reduces chances of version conflicts between the GET and the + * index operation. + *

+ * The _source field must be enabled to use this API. In addition + * to _source, you can access the following variables through the + * ctx map: _index, _type, + * _id, _version, _routing, and + * _now (the current timestamp). * * @see API * specification @@ -168,7 +193,7 @@ public static UpdateRequestfalse, turn off source retrieval. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -179,8 +204,8 @@ public final SourceConfig source() { } /** - * Set to false to disable setting 'result' in the response to 'noop' if no - * change to the document occurred. + * If true, the result in the response is set to + * noop (no operation) when there are no changes to the document. *

* API name: {@code detect_noop} */ @@ -190,7 +215,8 @@ public final Boolean detectNoop() { } /** - * A partial update to an existing document. + * A partial update to an existing document. If both doc and + * script are specified, doc is ignored. *

* API name: {@code doc} */ @@ -200,7 +226,9 @@ public final TPartialDocument doc() { } /** - * Set to true to use the contents of 'doc' as the value of 'upsert' + * If true, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with doc_as_upsert is not + * supported. *

* API name: {@code doc_as_upsert} */ @@ -210,7 +238,7 @@ public final Boolean docAsUpsert() { } /** - * Required - Document ID + * Required - A unique identifier for the document to be updated. *

* API name: {@code id} */ @@ -239,7 +267,8 @@ public final Long ifSeqNo() { } /** - * Required - The name of the index + * Required - The name of the target index. By default, the index is created + * automatically if it doesn't exist. *

* API name: {@code index} */ @@ -259,8 +288,8 @@ public final String lang() { /** * If 'true', Elasticsearch refreshes the affected shards to make this operation - * visible to search, if 'wait_for' then wait for a refresh to make this - * operation visible to search, if 'false' do nothing with refreshes. + * visible to search. If 'wait_for', it waits for a refresh to make this + * operation visible to search. If 'false', it does nothing with refreshes. *

* API name: {@code refresh} */ @@ -270,7 +299,7 @@ public final Refresh refresh() { } /** - * If true, the destination must be an index alias. + * If true, the destination must be an index alias. *

* API name: {@code require_alias} */ @@ -280,8 +309,7 @@ public final Boolean requireAlias() { } /** - * Specify how many times should the operation be retried when a conflict - * occurs. + * The number of times the operation should be retried when a conflict occurs. *

* API name: {@code retry_on_conflict} */ @@ -291,7 +319,7 @@ public final Integer retryOnConflict() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -301,7 +329,7 @@ public final String routing() { } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -311,7 +339,7 @@ public final Script script() { } /** - * Set to true to execute the script whether or not the document exists. + * If true, run the script whether or not the document exists. *

* API name: {@code scripted_upsert} */ @@ -321,9 +349,10 @@ public final Boolean scriptedUpsert() { } /** - * Period to wait for dynamic mapping updates and active shards. This guarantees - * Elasticsearch waits for at least the timeout before failing. The actual wait - * time could be longer, particularly when multiple waits occur. + * The period to wait for the following operations: dynamic mapping updates and + * waiting for active shards. Elasticsearch waits for at least the timeout + * period before failing. The actual wait time could be longer, particularly + * when multiple waits occur. *

* API name: {@code timeout} */ @@ -334,7 +363,7 @@ public final Time timeout() { /** * If the document does not already exist, the contents of 'upsert' are inserted - * as a new document. If the document exists, the 'script' is executed. + * as a new document. If the document exists, the 'script' is run. *

* API name: {@code upsert} */ @@ -344,10 +373,10 @@ public final TDocument upsert() { } /** - * The number of shard copies that must be active before proceeding with the - * operations. Set to 'all' or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). Defaults to 1 meaning the primary - * shard. + * The number of copies of each shard that must be active before proceeding with + * the operation. Set to 'all' or any positive integer up to the total number of + * shards in the index (number_of_replicas+1). The default value of + * 1 means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -475,7 +504,7 @@ public static class Builder private JsonpSerializer tPartialDocumentSerializer; /** - * Set to false to disable source retrieval. You can also specify a + * If false, turn off source retrieval. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -486,7 +515,7 @@ public final Builder source(@Nullable SourceConfig } /** - * Set to false to disable source retrieval. You can also specify a + * If false, turn off source retrieval. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -497,8 +526,8 @@ public final Builder source( } /** - * Set to false to disable setting 'result' in the response to 'noop' if no - * change to the document occurred. + * If true, the result in the response is set to + * noop (no operation) when there are no changes to the document. *

* API name: {@code detect_noop} */ @@ -508,7 +537,8 @@ public final Builder detectNoop(@Nullable Boolean v } /** - * A partial update to an existing document. + * A partial update to an existing document. If both doc and + * script are specified, doc is ignored. *

* API name: {@code doc} */ @@ -518,7 +548,9 @@ public final Builder doc(@Nullable TPartialDocument } /** - * Set to true to use the contents of 'doc' as the value of 'upsert' + * If true, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with doc_as_upsert is not + * supported. *

* API name: {@code doc_as_upsert} */ @@ -528,7 +560,7 @@ public final Builder docAsUpsert(@Nullable Boolean } /** - * Required - Document ID + * Required - A unique identifier for the document to be updated. *

* API name: {@code id} */ @@ -558,7 +590,8 @@ public final Builder ifSeqNo(@Nullable Long value) } /** - * Required - The name of the index + * Required - The name of the target index. By default, the index is created + * automatically if it doesn't exist. *

* API name: {@code index} */ @@ -579,8 +612,8 @@ public final Builder lang(@Nullable String value) { /** * If 'true', Elasticsearch refreshes the affected shards to make this operation - * visible to search, if 'wait_for' then wait for a refresh to make this - * operation visible to search, if 'false' do nothing with refreshes. + * visible to search. If 'wait_for', it waits for a refresh to make this + * operation visible to search. If 'false', it does nothing with refreshes. *

* API name: {@code refresh} */ @@ -590,7 +623,7 @@ public final Builder refresh(@Nullable Refresh valu } /** - * If true, the destination must be an index alias. + * If true, the destination must be an index alias. *

* API name: {@code require_alias} */ @@ -600,8 +633,7 @@ public final Builder requireAlias(@Nullable Boolean } /** - * Specify how many times should the operation be retried when a conflict - * occurs. + * The number of times the operation should be retried when a conflict occurs. *

* API name: {@code retry_on_conflict} */ @@ -611,7 +643,7 @@ public final Builder retryOnConflict(@Nullable Inte } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -621,7 +653,7 @@ public final Builder routing(@Nullable String value } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -631,7 +663,7 @@ public final Builder script(@Nullable Script value) } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -640,7 +672,7 @@ public final Builder script(Functiontrue, run the script whether or not the document exists. *

* API name: {@code scripted_upsert} */ @@ -650,9 +682,10 @@ public final Builder scriptedUpsert(@Nullable Boole } /** - * Period to wait for dynamic mapping updates and active shards. This guarantees - * Elasticsearch waits for at least the timeout before failing. The actual wait - * time could be longer, particularly when multiple waits occur. + * The period to wait for the following operations: dynamic mapping updates and + * waiting for active shards. Elasticsearch waits for at least the timeout + * period before failing. The actual wait time could be longer, particularly + * when multiple waits occur. *

* API name: {@code timeout} */ @@ -662,9 +695,10 @@ public final Builder timeout(@Nullable Time value) } /** - * Period to wait for dynamic mapping updates and active shards. This guarantees - * Elasticsearch waits for at least the timeout before failing. The actual wait - * time could be longer, particularly when multiple waits occur. + * The period to wait for the following operations: dynamic mapping updates and + * waiting for active shards. Elasticsearch waits for at least the timeout + * period before failing. The actual wait time could be longer, particularly + * when multiple waits occur. *

* API name: {@code timeout} */ @@ -674,7 +708,7 @@ public final Builder timeout(Function * API name: {@code upsert} */ @@ -684,10 +718,10 @@ public final Builder upsert(@Nullable TDocument val } /** - * The number of shard copies that must be active before proceeding with the - * operations. Set to 'all' or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). Defaults to 1 meaning the primary - * shard. + * The number of copies of each shard that must be active before proceeding with + * the operation. Set to 'all' or any positive integer up to the total number of + * shards in the index (number_of_replicas+1). The default value of + * 1 means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ @@ -697,10 +731,10 @@ public final Builder waitForActiveShards(@Nullable } /** - * The number of shard copies that must be active before proceeding with the - * operations. Set to 'all' or any positive integer up to the total number of - * shards in the index (number_of_replicas+1). Defaults to 1 meaning the primary - * shard. + * The number of copies of each shard that must be active before proceeding with + * the operation. Set to 'all' or any positive integer up to the total number of + * shards in the index (number_of_replicas+1). The default value of + * 1 means it waits for each primary shard to be active. *

* API name: {@code wait_for_active_shards} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkOperationBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkOperationBase.java index 2de061b70..3dc24bfd4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkOperationBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkOperationBase.java @@ -105,7 +105,7 @@ public final String id() { } /** - * Name of the index or index alias to perform the action on. + * The name of the index or index alias to perform the action on. *

* API name: {@code _index} */ @@ -115,7 +115,7 @@ public final String index() { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ @@ -244,7 +244,7 @@ public final BuilderT id(@Nullable String value) { } /** - * Name of the index or index alias to perform the action on. + * The name of the index or index alias to perform the action on. *

* API name: {@code _index} */ @@ -254,7 +254,7 @@ public final BuilderT index(@Nullable String value) { } /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. *

* API name: {@code routing} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkResponseItem.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkResponseItem.java index ee9343ad8..1252b4045 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkResponseItem.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/BulkResponseItem.java @@ -143,9 +143,9 @@ public final String id() { } /** - * Required - Name of the index associated with the operation. If the operation - * targeted a data stream, this is the backing index into which the document was - * written. + * Required - The name of the index associated with the operation. If the + * operation targeted a data stream, this is the backing index into which the + * document was written. *

* API name: {@code _index} */ @@ -154,7 +154,7 @@ public final String index() { } /** - * Required - HTTP status code returned for the operation. + * Required - The HTTP status code returned for the operation. *

* API name: {@code status} */ @@ -163,8 +163,8 @@ public final int status() { } /** - * Contains additional information about the failed operation. The parameter is - * only returned for failed operations. + * Additional information about the failed operation. The property is returned + * only for failed operations. *

* API name: {@code error} */ @@ -174,7 +174,8 @@ public final ErrorCause error() { } /** - * The primary term assigned to the document for the operation. + * The primary term assigned to the document for the operation. This property is + * returned only for successful operations. *

* API name: {@code _primary_term} */ @@ -184,7 +185,7 @@ public final Long primaryTerm() { } /** - * Result of the operation. Successful values are created, + * The result of the operation. Successful values are created, * deleted, and updated. *

* API name: {@code result} @@ -196,7 +197,7 @@ public final String result() { /** * The sequence number assigned to the document for the operation. Sequence - * numbers are used to ensure an older version of a document doesn’t overwrite a + * numbers are used to ensure an older version of a document doesn't overwrite a * newer version. *

* API name: {@code _seq_no} @@ -207,7 +208,7 @@ public final Long seqNo() { } /** - * Contains shard information for the operation. + * Shard information for the operation. *

* API name: {@code _shards} */ @@ -218,7 +219,8 @@ public final ShardStatistics shards() { /** * The document version associated with the operation. The document version is - * incremented each time the document is updated. + * incremented each time the document is updated. This property is returned only + * for successful actions. *

* API name: {@code _version} */ @@ -375,9 +377,9 @@ public final Builder id(@Nullable String value) { } /** - * Required - Name of the index associated with the operation. If the operation - * targeted a data stream, this is the backing index into which the document was - * written. + * Required - The name of the index associated with the operation. If the + * operation targeted a data stream, this is the backing index into which the + * document was written. *

* API name: {@code _index} */ @@ -387,7 +389,7 @@ public final Builder index(String value) { } /** - * Required - HTTP status code returned for the operation. + * Required - The HTTP status code returned for the operation. *

* API name: {@code status} */ @@ -397,8 +399,8 @@ public final Builder status(int value) { } /** - * Contains additional information about the failed operation. The parameter is - * only returned for failed operations. + * Additional information about the failed operation. The property is returned + * only for failed operations. *

* API name: {@code error} */ @@ -408,8 +410,8 @@ public final Builder error(@Nullable ErrorCause value) { } /** - * Contains additional information about the failed operation. The parameter is - * only returned for failed operations. + * Additional information about the failed operation. The property is returned + * only for failed operations. *

* API name: {@code error} */ @@ -418,7 +420,8 @@ public final Builder error(Function * API name: {@code _primary_term} */ @@ -428,7 +431,7 @@ public final Builder primaryTerm(@Nullable Long value) { } /** - * Result of the operation. Successful values are created, + * The result of the operation. Successful values are created, * deleted, and updated. *

* API name: {@code result} @@ -440,7 +443,7 @@ public final Builder result(@Nullable String value) { /** * The sequence number assigned to the document for the operation. Sequence - * numbers are used to ensure an older version of a document doesn’t overwrite a + * numbers are used to ensure an older version of a document doesn't overwrite a * newer version. *

* API name: {@code _seq_no} @@ -451,7 +454,7 @@ public final Builder seqNo(@Nullable Long value) { } /** - * Contains shard information for the operation. + * Shard information for the operation. *

* API name: {@code _shards} */ @@ -461,7 +464,7 @@ public final Builder shards(@Nullable ShardStatistics value) { } /** - * Contains shard information for the operation. + * Shard information for the operation. *

* API name: {@code _shards} */ @@ -471,7 +474,8 @@ public final Builder shards(Function * API name: {@code _version} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateAction.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateAction.java index e2b71156d..3d8d1cf35 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateAction.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateAction.java @@ -112,8 +112,8 @@ public static UpdateActionresult in the response is set to 'noop' when no + * changes to the document occur. *

* API name: {@code detect_noop} */ @@ -133,7 +133,8 @@ public final TPartialDocument doc() { } /** - * Set to true to use the contents of 'doc' as the value of 'upsert' + * Set to true to use the contents of doc as the value + * of upsert. *

* API name: {@code doc_as_upsert} */ @@ -143,7 +144,7 @@ public final Boolean docAsUpsert() { } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -153,7 +154,8 @@ public final Script script() { } /** - * Set to true to execute the script whether or not the document exists. + * Set to true to run the script whether or not the document + * exists. *

* API name: {@code scripted_upsert} */ @@ -163,7 +165,7 @@ public final Boolean scriptedUpsert() { } /** - * Set to false to disable source retrieval. You can also specify a + * If false, source retrieval is turned off. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -174,8 +176,9 @@ public final SourceConfig source() { } /** - * If the document does not already exist, the contents of 'upsert' are inserted - * as a new document. If the document exists, the 'script' is executed. + * If the document does not already exist, the contents of upsert + * are inserted as a new document. If the document exists, the + * script is run. *

* API name: {@code upsert} */ @@ -277,8 +280,8 @@ public static class Builder private JsonpSerializer tPartialDocumentSerializer; /** - * Set to false to disable setting 'result' in the response to 'noop' if no - * change to the document occurred. + * If true, the result in the response is set to 'noop' when no + * changes to the document occur. *

* API name: {@code detect_noop} */ @@ -298,7 +301,8 @@ public final Builder doc(@Nullable TPartialDocument } /** - * Set to true to use the contents of 'doc' as the value of 'upsert' + * Set to true to use the contents of doc as the value + * of upsert. *

* API name: {@code doc_as_upsert} */ @@ -308,7 +312,7 @@ public final Builder docAsUpsert(@Nullable Boolean } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -318,7 +322,7 @@ public final Builder script(@Nullable Script value) } /** - * Script to execute to update the document. + * The script to run to update the document. *

* API name: {@code script} */ @@ -327,7 +331,8 @@ public final Builder script(Functiontrue to run the script whether or not the document + * exists. *

* API name: {@code scripted_upsert} */ @@ -337,7 +342,7 @@ public final Builder scriptedUpsert(@Nullable Boole } /** - * Set to false to disable source retrieval. You can also specify a + * If false, source retrieval is turned off. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -348,7 +353,7 @@ public final Builder source(@Nullable SourceConfig } /** - * Set to false to disable source retrieval. You can also specify a + * If false, source retrieval is turned off. You can also specify a * comma-separated list of the fields you want to retrieve. *

* API name: {@code _source} @@ -359,8 +364,9 @@ public final Builder source( } /** - * If the document does not already exist, the contents of 'upsert' are inserted - * as a new document. If the document exists, the 'script' is executed. + * If the document does not already exist, the contents of upsert + * are inserted as a new document. If the document exists, the + * script is run. *

* API name: {@code upsert} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateOperation.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateOperation.java index bd060c89d..64cc3bd33 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateOperation.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/UpdateOperation.java @@ -137,7 +137,7 @@ public Iterator _serializables() { } /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ @@ -147,6 +147,9 @@ public final Boolean requireAlias() { } /** + * The number of times an update should be retried in the case of a version + * conflict. + *

* API name: {@code retry_on_conflict} */ @Nullable @@ -233,7 +236,7 @@ public final Builder binaryAction(@Nullable BinaryD private JsonpSerializer tPartialDocumentSerializer; /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ @@ -243,6 +246,9 @@ public final Builder requireAlias(@Nullable Boolean } /** + * The number of times an update should be retried in the case of a version + * conflict. + *

* API name: {@code retry_on_conflict} */ public final Builder retryOnConflict(@Nullable Integer value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/WriteOperation.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/WriteOperation.java index 776d62830..ca9f3904b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/WriteOperation.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/bulk/WriteOperation.java @@ -78,10 +78,10 @@ protected WriteOperation(AbstractBuilder builder) { } /** - * A map from the full name of fields to the name of dynamic templates. Defaults - * to an empty map. If a name matches a dynamic template, then that template + * A map from the full name of fields to the name of dynamic templates. It + * defaults to an empty map. If a name matches a dynamic template, that template * will be applied regardless of other match predicates defined in the template. - * If a field is already defined in the mapping, then this parameter won’t be + * If a field is already defined in the mapping, then this parameter won't be * used. *

* API name: {@code dynamic_templates} @@ -91,10 +91,10 @@ public final Map dynamicTemplates() { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -105,7 +105,7 @@ public final String pipeline() { } /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ @@ -154,10 +154,10 @@ public abstract static class AbstractBuilder * API name: {@code dynamic_templates} @@ -170,10 +170,10 @@ public final BuilderT dynamicTemplates(Map map) { } /** - * A map from the full name of fields to the name of dynamic templates. Defaults - * to an empty map. If a name matches a dynamic template, then that template + * A map from the full name of fields to the name of dynamic templates. It + * defaults to an empty map. If a name matches a dynamic template, that template * will be applied regardless of other match predicates defined in the template. - * If a field is already defined in the mapping, then this parameter won’t be + * If a field is already defined in the mapping, then this parameter won't be * used. *

* API name: {@code dynamic_templates} @@ -186,10 +186,10 @@ public final BuilderT dynamicTemplates(String key, String value) { } /** - * ID of the pipeline to use to preprocess incoming documents. If the index has - * a default ingest pipeline specified, then setting the value to - * _none disables the default ingest pipeline for this request. If - * a final pipeline is configured it will always run, regardless of the value of + * The ID of the pipeline to use to preprocess incoming documents. If the index + * has a default ingest pipeline specified, setting the value to + * _none turns off the default ingest pipeline for this request. If + * a final pipeline is configured, it will always run regardless of the value of * this parameter. *

* API name: {@code pipeline} @@ -200,7 +200,7 @@ public final BuilderT pipeline(@Nullable String value) { } /** - * If true, the request’s actions must target an index alias. + * If true, the request's actions must target an index alias. *

* API name: {@code require_alias} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/get/GetResult.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/get/GetResult.java index 222467ece..a0cb8169f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/get/GetResult.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/get/GetResult.java @@ -119,13 +119,19 @@ public static GetResult getResultOf( } /** - * Required - API name: {@code _index} + * Required - The name of the index the document belongs to. + *

+ * API name: {@code _index} */ public final String index() { return this.index; } /** + * If the stored_fields parameter is set to true and + * found is true, it contains the document fields + * stored in the index. + *

* API name: {@code fields} */ public final Map fields() { @@ -140,20 +146,26 @@ public final List ignored() { } /** - * Required - API name: {@code found} + * Required - Indicates whether the document exists. + *

+ * API name: {@code found} */ public final boolean found() { return this.found; } /** - * Required - API name: {@code _id} + * Required - The unique identifier for the document. + *

+ * API name: {@code _id} */ public final String id() { return this.id; } /** + * The primary term assigned to the document for the indexing operation. + *

* API name: {@code _primary_term} */ @Nullable @@ -162,6 +174,8 @@ public final Long primaryTerm() { } /** + * The explicit routing, if set. + *

* API name: {@code _routing} */ @Nullable @@ -170,6 +184,10 @@ public final String routing() { } /** + * The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't + * overwrite a newer version. + *

* API name: {@code _seq_no} */ @Nullable @@ -178,6 +196,11 @@ public final Long seqNo() { } /** + * If found is true, it contains the document data + * formatted in JSON. If the _source parameter is set to + * false or the stored_fields parameter is set to + * true, it is excluded. + *

* API name: {@code _source} */ @Nullable @@ -186,6 +209,8 @@ public final TDocument source() { } /** + * The document version, which is ncremented each time the document is updated. + *

* API name: {@code _version} */ @Nullable @@ -328,7 +353,9 @@ public abstract static class AbstractBuilder tDocumentSerializer; /** - * Required - API name: {@code _index} + * Required - The name of the index the document belongs to. + *

+ * API name: {@code _index} */ public final BuilderT index(String value) { this.index = value; @@ -336,6 +363,10 @@ public final BuilderT index(String value) { } /** + * If the stored_fields parameter is set to true and + * found is true, it contains the document fields + * stored in the index. + *

* API name: {@code fields} *

* Adds all entries of map to fields. @@ -346,6 +377,10 @@ public final BuilderT fields(Map map) { } /** + * If the stored_fields parameter is set to true and + * found is true, it contains the document fields + * stored in the index. + *

* API name: {@code fields} *

* Adds an entry to fields. @@ -376,7 +411,9 @@ public final BuilderT ignored(String value, String... values) { } /** - * Required - API name: {@code found} + * Required - Indicates whether the document exists. + *

+ * API name: {@code found} */ public final BuilderT found(boolean value) { this.found = value; @@ -384,7 +421,9 @@ public final BuilderT found(boolean value) { } /** - * Required - API name: {@code _id} + * Required - The unique identifier for the document. + *

+ * API name: {@code _id} */ public final BuilderT id(String value) { this.id = value; @@ -392,6 +431,8 @@ public final BuilderT id(String value) { } /** + * The primary term assigned to the document for the indexing operation. + *

* API name: {@code _primary_term} */ public final BuilderT primaryTerm(@Nullable Long value) { @@ -400,6 +441,8 @@ public final BuilderT primaryTerm(@Nullable Long value) { } /** + * The explicit routing, if set. + *

* API name: {@code _routing} */ public final BuilderT routing(@Nullable String value) { @@ -408,6 +451,10 @@ public final BuilderT routing(@Nullable String value) { } /** + * The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't + * overwrite a newer version. + *

* API name: {@code _seq_no} */ public final BuilderT seqNo(@Nullable Long value) { @@ -416,6 +463,11 @@ public final BuilderT seqNo(@Nullable Long value) { } /** + * If found is true, it contains the document data + * formatted in JSON. If the _source parameter is set to + * false or the stored_fields parameter is set to + * true, it is excluded. + *

* API name: {@code _source} */ public final BuilderT source(@Nullable TDocument value) { @@ -424,6 +476,8 @@ public final BuilderT source(@Nullable TDocument value) { } /** + * The document version, which is ncremented each time the document is updated. + *

* API name: {@code _version} */ public final BuilderT version(@Nullable Long value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/msearch_template/TemplateConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/msearch_template/TemplateConfig.java index 10d395201..12027d6a3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/msearch_template/TemplateConfig.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/msearch_template/TemplateConfig.java @@ -105,8 +105,8 @@ public final Boolean explain() { } /** - * ID of the search template to use. If no source is specified, this parameter - * is required. + * The ID of the search template to use. If no source is specified, + * this parameter is required. *

* API name: {@code id} */ @@ -137,8 +137,8 @@ public final Boolean profile() { /** * An inline search template. Supports the same parameters as the search API's - * request body. Also supports Mustache variables. If no id is specified, this - * parameter is required. + * request body. It also supports Mustache variables. If no id is + * specified, this parameter is required. *

* API name: {@code source} */ @@ -231,8 +231,8 @@ public final Builder explain(@Nullable Boolean value) { } /** - * ID of the search template to use. If no source is specified, this parameter - * is required. + * The ID of the search template to use. If no source is specified, + * this parameter is required. *

* API name: {@code id} */ @@ -279,8 +279,8 @@ public final Builder profile(@Nullable Boolean value) { /** * An inline search template. Supports the same parameters as the search API's - * request body. Also supports Mustache variables. If no id is specified, this - * parameter is required. + * request body. It also supports Mustache variables. If no id is + * specified, this parameter is required. *

* API name: {@code source} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricDiscountedCumulativeGain.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricDiscountedCumulativeGain.java index 8d0c66842..4b084ef89 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricDiscountedCumulativeGain.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricDiscountedCumulativeGain.java @@ -52,7 +52,7 @@ * Discounted cumulative gain (DCG) * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html#_discounted_cumulative_gain_dcg">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricExpectedReciprocalRank.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricExpectedReciprocalRank.java index e915ebed3..97fa3cab3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricExpectedReciprocalRank.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricExpectedReciprocalRank.java @@ -52,7 +52,7 @@ * Expected Reciprocal Rank (ERR) * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html#_expected_reciprocal_rank_err">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricMeanReciprocalRank.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricMeanReciprocalRank.java index e394b4fd8..238c24ec7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricMeanReciprocalRank.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricMeanReciprocalRank.java @@ -49,7 +49,7 @@ * Mean Reciprocal Rank * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html#_mean_reciprocal_rank">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricPrecision.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricPrecision.java index 8be6fed75..565b62d14 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricPrecision.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricPrecision.java @@ -52,7 +52,7 @@ * Precision at K (P@k) * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html#k-precision">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricRecall.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricRecall.java index 35c982d78..3d22c39c9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricRecall.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/rank_eval/RankEvalMetricRecall.java @@ -49,7 +49,7 @@ * Recall at K (R@k) * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html#k-recall">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Destination.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Destination.java index c3396d1b5..528095e1a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Destination.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Destination.java @@ -102,8 +102,10 @@ public final String index() { } /** - * Set to create to only index documents that do not already exist. - * Important: To reindex to a data stream destination, this argument must be + * If it is create, the operation will only index documents that do + * not already exist (also known as "put if absent"). + *

+ * IMPORTANT: To reindex to a data stream destination, this argument must be * create. *

* API name: {@code op_type} @@ -124,9 +126,13 @@ public final String pipeline() { } /** - * By default, a document's routing is preserved unless it’s changed by the - * script. Set to discard to set routing to null, or - * =value to route using the specified value. + * By default, a document's routing is preserved unless it's changed by the + * script. If it is keep, the routing on the bulk request sent for + * each match is set to the routing on the match. If it is discard, + * the routing on the bulk request sent for each match is set to + * null. If it is =value, the routing on the bulk + * request sent for each match is set to all value specified after the equals + * sign (=). *

* API name: {@code routing} */ @@ -218,8 +224,10 @@ public final Builder index(String value) { } /** - * Set to create to only index documents that do not already exist. - * Important: To reindex to a data stream destination, this argument must be + * If it is create, the operation will only index documents that do + * not already exist (also known as "put if absent"). + *

+ * IMPORTANT: To reindex to a data stream destination, this argument must be * create. *

* API name: {@code op_type} @@ -240,9 +248,13 @@ public final Builder pipeline(@Nullable String value) { } /** - * By default, a document's routing is preserved unless it’s changed by the - * script. Set to discard to set routing to null, or - * =value to route using the specified value. + * By default, a document's routing is preserved unless it's changed by the + * script. If it is keep, the routing on the bulk request sent for + * each match is set to the routing on the match. If it is discard, + * the routing on the bulk request sent for each match is set to + * null. If it is =value, the routing on the bulk + * request sent for each match is set to all value specified after the equals + * sign (=). *

* API name: {@code routing} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/RemoteSource.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/RemoteSource.java index d58d708f1..5d0bbae7f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/RemoteSource.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/RemoteSource.java @@ -95,7 +95,7 @@ public static RemoteSource of(Function> fn) } /** - * The remote connection timeout. Defaults to 30 seconds. + * The remote connection timeout. *

* API name: {@code connect_timeout} */ @@ -115,7 +115,7 @@ public final Map headers() { /** * Required - The URL for the remote instance of Elasticsearch that you want to - * index from. + * index from. This information is required when you're indexing from remote. *

* API name: {@code host} */ @@ -144,7 +144,7 @@ public final String password() { } /** - * The remote socket read timeout. Defaults to 30 seconds. + * The remote socket read timeout. *

* API name: {@code socket_timeout} */ @@ -231,7 +231,7 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Time socketTimeout; /** - * The remote connection timeout. Defaults to 30 seconds. + * The remote connection timeout. *

* API name: {@code connect_timeout} */ @@ -241,7 +241,7 @@ public final Builder connectTimeout(@Nullable Time value) { } /** - * The remote connection timeout. Defaults to 30 seconds. + * The remote connection timeout. *

* API name: {@code connect_timeout} */ @@ -275,7 +275,7 @@ public final Builder headers(String key, String value) { /** * Required - The URL for the remote instance of Elasticsearch that you want to - * index from. + * index from. This information is required when you're indexing from remote. *

* API name: {@code host} */ @@ -305,7 +305,7 @@ public final Builder password(@Nullable String value) { } /** - * The remote socket read timeout. Defaults to 30 seconds. + * The remote socket read timeout. *

* API name: {@code socket_timeout} */ @@ -315,7 +315,7 @@ public final Builder socketTimeout(@Nullable Time value) { } /** - * The remote socket read timeout. Defaults to 30 seconds. + * The remote socket read timeout. *

* API name: {@code socket_timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Source.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Source.java index 99e8c3ab4..1ef94fe52 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Source.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/reindex/Source.java @@ -107,7 +107,7 @@ public static Source of(Function> fn) { /** * Required - The name of the data stream, index, or alias you are copying from. - * Accepts a comma-separated list to reindex from multiple sources. + * It accepts a comma-separated list to reindex from multiple sources. *

* API name: {@code index} */ @@ -116,7 +116,7 @@ public final List index() { } /** - * Specifies the documents to reindex using the Query DSL. + * The documents to reindex, which is defined with Query DSL. *

* API name: {@code query} */ @@ -136,9 +136,9 @@ public final RemoteSource remote() { } /** - * The number of documents to index per batch. Use when indexing from remote to - * ensure that the batches fit within the on-heap buffer, which defaults to a - * maximum size of 100 MB. + * The number of documents to index per batch. Use it when you are indexing from + * remote to ensure that the batches fit within the on-heap buffer, which + * defaults to a maximum size of 100 MB. *

* API name: {@code size} */ @@ -159,14 +159,27 @@ public final SlicedScroll slice() { } /** + * A comma-separated list of <field>:<direction> pairs + * to sort by before indexing. Use it in conjunction with max_docs + * to control what documents are reindexed. + *

+ * WARNING: Sort in reindex is deprecated. Sorting in reindex was never + * guaranteed to index documents in order and prevents further development of + * reindex such as resilience and performance improvements. If used in + * combination with max_docs, consider using a query filter + * instead. + *

* API name: {@code sort} + * + * @deprecated 7.6.0 */ + @Deprecated public final List sort() { return this.sort; } /** - * If true reindexes all source fields. Set to a list to reindex + * If true, reindex all source fields. Set it to a list to reindex * select fields. *

* API name: {@code _source} @@ -294,7 +307,7 @@ public static class Builder extends WithJsonObjectBuilderBase implement /** * Required - The name of the data stream, index, or alias you are copying from. - * Accepts a comma-separated list to reindex from multiple sources. + * It accepts a comma-separated list to reindex from multiple sources. *

* API name: {@code index} *

@@ -307,7 +320,7 @@ public final Builder index(List list) { /** * Required - The name of the data stream, index, or alias you are copying from. - * Accepts a comma-separated list to reindex from multiple sources. + * It accepts a comma-separated list to reindex from multiple sources. *

* API name: {@code index} *

@@ -319,7 +332,7 @@ public final Builder index(String value, String... values) { } /** - * Specifies the documents to reindex using the Query DSL. + * The documents to reindex, which is defined with Query DSL. *

* API name: {@code query} */ @@ -329,7 +342,7 @@ public final Builder query(@Nullable Query value) { } /** - * Specifies the documents to reindex using the Query DSL. + * The documents to reindex, which is defined with Query DSL. *

* API name: {@code query} */ @@ -357,9 +370,9 @@ public final Builder remote(Function * API name: {@code size} */ @@ -390,36 +403,75 @@ public final Builder slice(Function<field>:<direction> pairs + * to sort by before indexing. Use it in conjunction with max_docs + * to control what documents are reindexed. + *

+ * WARNING: Sort in reindex is deprecated. Sorting in reindex was never + * guaranteed to index documents in order and prevents further development of + * reindex such as resilience and performance improvements. If used in + * combination with max_docs, consider using a query filter + * instead. + *

* API name: {@code sort} *

* Adds all elements of list to sort. + * + * @deprecated 7.6.0 */ + @Deprecated public final Builder sort(List list) { this.sort = _listAddAll(this.sort, list); return this; } /** + * A comma-separated list of <field>:<direction> pairs + * to sort by before indexing. Use it in conjunction with max_docs + * to control what documents are reindexed. + *

+ * WARNING: Sort in reindex is deprecated. Sorting in reindex was never + * guaranteed to index documents in order and prevents further development of + * reindex such as resilience and performance improvements. If used in + * combination with max_docs, consider using a query filter + * instead. + *

* API name: {@code sort} *

* Adds one or more values to sort. + * + * @deprecated 7.6.0 */ + @Deprecated public final Builder sort(SortOptions value, SortOptions... values) { this.sort = _listAdd(this.sort, value, values); return this; } /** + * A comma-separated list of <field>:<direction> pairs + * to sort by before indexing. Use it in conjunction with max_docs + * to control what documents are reindexed. + *

+ * WARNING: Sort in reindex is deprecated. Sorting in reindex was never + * guaranteed to index documents in order and prevents further development of + * reindex such as resilience and performance improvements. If used in + * combination with max_docs, consider using a query filter + * instead. + *

* API name: {@code sort} *

* Adds a value to sort using a builder lambda. + * + * @deprecated 7.6.0 */ + @Deprecated public final Builder sort(Function> fn) { return sort(fn.apply(new SortOptions.Builder()).build()); } /** - * If true reindexes all source fields. Set to a list to reindex + * If true, reindex all source fields. Set it to a list to reindex * select fields. *

* API name: {@code _source} @@ -432,7 +484,7 @@ public final Builder sourceFields(List list) { } /** - * If true reindexes all source fields. Set to a list to reindex + * If true, reindex all source fields. Set it to a list to reindex * select fields. *

* API name: {@code _source} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContext.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContext.java new file mode 100644 index 000000000..f44488561 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContext.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.core.scripts_painless_execute; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum PainlessContext implements JsonEnum { + /** + * The default context if no other context is specified. + */ + PainlessTest("painless_test"), + + /** + * Treats scripts as if they were run inside a script query. + */ + Filter("filter"), + + /** + * Treats scripts as if they were run inside a script_score + * function in a function_score query. + */ + Score("score"), + + /** + * The context for boolean fields. The script returns a true or + * false response. + */ + BooleanField("boolean_field"), + + /** + * The context for date fields. emit takes a long value and the + * script returns a sorted list of dates. + */ + DateField("date_field"), + + /** + * The context for double numeric fields. The script returns a sorted list of + * double values. + */ + DoubleField("double_field"), + + /** + * The context for geo-point fields. emit takes two double + * parameters, the latitude and longitude values, and the script returns an + * object in GeoJSON format containing the coordinates for the geo point. + */ + GeoPointField("geo_point_field"), + + /** + * The context for ip fields. The script returns a sorted list of + * IP addresses. + */ + IpField("ip_field"), + + /** + * The context for keyword fields. The script returns a sorted list of string + * values. + */ + KeywordField("keyword_field"), + + /** + * The context for long numeric fields. The script returns a sorted list of long + * values. + */ + LongField("long_field"), + + /** + * The context for composite runtime fields. The script returns a map of values. + */ + CompositeField("composite_field"), + + ; + + private final String jsonValue; + + PainlessContext(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + PainlessContext.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContextSetup.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContextSetup.java index f97e35159..7cb1b3c4c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContextSetup.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/scripts_painless_execute/PainlessContextSetup.java @@ -84,7 +84,7 @@ public static PainlessContextSetup of(Function * API name: {@code document} @@ -94,9 +94,18 @@ public final JsonData document() { } /** - * Required - Index containing a mapping that’s compatible with the indexed + * Required - Index containing a mapping that's compatible with the indexed * document. You may specify a remote index by prefixing the index with the - * remote cluster alias. + * remote cluster alias. For example, remote1:my_index indicates + * that you want to run the painless script against the "my_index" + * index on the "remote1" cluster. This request will be forwarded to + * the "remote1" cluster if you have configured a connection to that + * remote cluster. + *

+ * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression *:myindex will return the error "No such + * remote cluster" and the expression logs* or + * remote1:logs* will return the error "index not found". *

* API name: {@code index} */ @@ -161,7 +170,7 @@ public static class Builder extends WithJsonObjectBuilderBase private Query query; /** - * Required - Document that’s temporarily indexed in-memory and accessible from + * Required - Document that's temporarily indexed in-memory and accessible from * the script. *

* API name: {@code document} @@ -172,9 +181,18 @@ public final Builder document(JsonData value) { } /** - * Required - Index containing a mapping that’s compatible with the indexed + * Required - Index containing a mapping that's compatible with the indexed * document. You may specify a remote index by prefixing the index with the - * remote cluster alias. + * remote cluster alias. For example, remote1:my_index indicates + * that you want to run the painless script against the "my_index" + * index on the "remote1" cluster. This request will be forwarded to + * the "remote1" cluster if you have configured a connection to that + * remote cluster. + *

+ * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression *:myindex will return the error "No such + * remote cluster" and the expression logs* or + * remote1:logs* will return the error "index not found". *

* API name: {@code index} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Context.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Context.java index 51eaf8608..f99e4191f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Context.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Context.java @@ -62,7 +62,7 @@ * document's field for the text. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html#_document_input_parameters">Documentation * on elastic.co * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/ResponseBody.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/ResponseBody.java index d2f9baea0..8a20a3629 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/ResponseBody.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/ResponseBody.java @@ -131,28 +131,53 @@ protected ResponseBody(AbstractBuilder builder) { } /** - * Required - API name: {@code took} + * Required - The number of milliseconds it took Elasticsearch to run the + * request. This value is calculated by measuring the time elapsed between + * receipt of a request on the coordinating node and the time at which the + * coordinating node is ready to send the response. It includes: + *

+ *

+ * It does not include: + *

    + *
  • Time needed to send the request to Elasticsearch
  • + *
  • Time needed to serialize the JSON response
  • + *
  • Time needed to send the response to a client
  • + *
+ *

+ * API name: {@code took} */ public final long took() { return this.took; } /** - * Required - API name: {@code timed_out} + * Required - If true, the request timed out before completion; + * returned results may be partial or empty. + *

+ * API name: {@code timed_out} */ public final boolean timedOut() { return this.timedOut; } /** - * Required - API name: {@code _shards} + * Required - A count of shards used for the request. + *

+ * API name: {@code _shards} */ public final ShardStatistics shards() { return this.shards; } /** - * Required - API name: {@code hits} + * Required - The returned documents and metadata. + *

+ * API name: {@code hits} */ public final HitsMetadata hits() { return this.hits; @@ -213,6 +238,11 @@ public final String pitId() { } /** + * The identifier for the search and its search context. You can use this scroll + * ID with the scroll API to retrieve the next batch of search results for the + * request. This property is returned only if the scroll query + * parameter is specified in the request. + *

* API name: {@code _scroll_id} */ @Nullable @@ -367,7 +397,25 @@ public abstract static class AbstractBuilder tDocumentSerializer; /** - * Required - API name: {@code took} + * Required - The number of milliseconds it took Elasticsearch to run the + * request. This value is calculated by measuring the time elapsed between + * receipt of a request on the coordinating node and the time at which the + * coordinating node is ready to send the response. It includes: + *

    + *
  • Communication time between the coordinating node and data nodes
  • + *
  • Time the request spends in the search thread pool, queued for + * execution
  • + *
  • Actual run time
  • + *
+ *

+ * It does not include: + *

    + *
  • Time needed to send the request to Elasticsearch
  • + *
  • Time needed to serialize the JSON response
  • + *
  • Time needed to send the response to a client
  • + *
+ *

+ * API name: {@code took} */ public final BuilderT took(long value) { this.took = value; @@ -375,7 +423,10 @@ public final BuilderT took(long value) { } /** - * Required - API name: {@code timed_out} + * Required - If true, the request timed out before completion; + * returned results may be partial or empty. + *

+ * API name: {@code timed_out} */ public final BuilderT timedOut(boolean value) { this.timedOut = value; @@ -383,7 +434,9 @@ public final BuilderT timedOut(boolean value) { } /** - * Required - API name: {@code _shards} + * Required - A count of shards used for the request. + *

+ * API name: {@code _shards} */ public final BuilderT shards(ShardStatistics value) { this.shards = value; @@ -391,14 +444,18 @@ public final BuilderT shards(ShardStatistics value) { } /** - * Required - API name: {@code _shards} + * Required - A count of shards used for the request. + *

+ * API name: {@code _shards} */ public final BuilderT shards(Function> fn) { return this.shards(fn.apply(new ShardStatistics.Builder()).build()); } /** - * Required - API name: {@code hits} + * Required - The returned documents and metadata. + *

+ * API name: {@code hits} */ public final BuilderT hits(HitsMetadata value) { this.hits = value; @@ -406,7 +463,9 @@ public final BuilderT hits(HitsMetadata value) { } /** - * Required - API name: {@code hits} + * Required - The returned documents and metadata. + *

+ * API name: {@code hits} */ public final BuilderT hits( Function, ObjectBuilder>> fn) { @@ -517,6 +576,11 @@ public final BuilderT pitId(@Nullable String value) { } /** + * The identifier for the search and its search context. You can use this scroll + * ID with the scroll API to retrieve the next batch of search results for the + * request. This property is returned only if the scroll query + * parameter is specified in the request. + *

* API name: {@code _scroll_id} */ public final BuilderT scrollId(@Nullable String value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/termvectors/Filter.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/termvectors/Filter.java index 9a363d955..9651c8b6f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/termvectors/Filter.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/termvectors/Filter.java @@ -108,7 +108,7 @@ public final Integer maxDocFreq() { } /** - * Maximum number of terms that must be returned per field. + * The maximum number of terms that must be returned per field. *

* API name: {@code max_num_terms} */ @@ -118,7 +118,7 @@ public final Integer maxNumTerms() { } /** - * Ignore words with more than this frequency in the source doc. Defaults to + * Ignore words with more than this frequency in the source doc. It defaults to * unbounded. *

* API name: {@code max_term_freq} @@ -262,7 +262,7 @@ public final Builder maxDocFreq(@Nullable Integer value) { } /** - * Maximum number of terms that must be returned per field. + * The maximum number of terms that must be returned per field. *

* API name: {@code max_num_terms} */ @@ -272,7 +272,7 @@ public final Builder maxNumTerms(@Nullable Integer value) { } /** - * Ignore words with more than this frequency in the source doc. Defaults to + * Ignore words with more than this frequency in the source doc. It defaults to * unbounded. *

* API name: {@code max_term_freq} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java index 6e11918c6..709fae3d4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java @@ -77,7 +77,7 @@ public ElasticsearchDanglingIndicesAsyncClient withTransportOptions(@Nullable Tr * node is offline. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index">Documentation * on elastic.co */ @@ -99,7 +99,7 @@ public CompletableFuture deleteDanglingIndex(Delete * a function that initializes a builder to create the * {@link DeleteDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index">Documentation * on elastic.co */ @@ -120,7 +120,7 @@ public final CompletableFuture deleteDanglingIndex( * node is offline. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index">Documentation * on elastic.co */ @@ -144,7 +144,7 @@ public CompletableFuture importDanglingIndex(Import * a function that initializes a builder to create the * {@link ImportDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index">Documentation * on elastic.co */ @@ -167,7 +167,7 @@ public final CompletableFuture importDanglingIndex( * Use this API to list dangling indices, which you can then import or delete. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices">Documentation * on elastic.co */ public CompletableFuture listDanglingIndices() { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java index bada7b9b7..c3a161371 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java @@ -78,7 +78,7 @@ public ElasticsearchDanglingIndicesClient withTransportOptions(@Nullable Transpo * node is offline. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index">Documentation * on elastic.co */ @@ -101,7 +101,7 @@ public DeleteDanglingIndexResponse deleteDanglingIndex(DeleteDanglingIndexReques * a function that initializes a builder to create the * {@link DeleteDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index">Documentation * on elastic.co */ @@ -123,7 +123,7 @@ public final DeleteDanglingIndexResponse deleteDanglingIndex( * node is offline. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index">Documentation * on elastic.co */ @@ -148,7 +148,7 @@ public ImportDanglingIndexResponse importDanglingIndex(ImportDanglingIndexReques * a function that initializes a builder to create the * {@link ImportDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index">Documentation * on elastic.co */ @@ -172,7 +172,7 @@ public final ImportDanglingIndexResponse importDanglingIndex( * Use this API to list dangling indices, which you can then import or delete. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices">Documentation * on elastic.co */ public ListDanglingIndicesResponse listDanglingIndices() throws IOException, ElasticsearchException { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html index 79a2c1175..8b9a8cc75 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html @@ -4,57 +4,57 @@ Elasticsearch API specification - Please see the Elasticsearch API specification. + Please see the Elasticsearch API specification. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichAsyncClient.java index 56655e91b..051ff9dff 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichAsyncClient.java @@ -72,7 +72,7 @@ public ElasticsearchEnrichAsyncClient withTransportOptions(@Nullable TransportOp * index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy">Documentation * on elastic.co */ @@ -91,7 +91,7 @@ public CompletableFuture deletePolicy(DeletePolicyRequest * a function that initializes a builder to create the * {@link DeletePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy">Documentation * on elastic.co */ @@ -106,7 +106,7 @@ public final CompletableFuture deletePolicy( * Run an enrich policy. Create the enrich index for an existing enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy">Documentation * on elastic.co */ @@ -124,7 +124,7 @@ public CompletableFuture executePolicy(ExecutePolicyReque * a function that initializes a builder to create the * {@link ExecutePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy">Documentation * on elastic.co */ @@ -139,7 +139,7 @@ public final CompletableFuture executePolicy( * Get an enrich policy. Returns information about an enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -157,7 +157,7 @@ public CompletableFuture getPolicy(GetPolicyRequest request) * a function that initializes a builder to create the * {@link GetPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -170,7 +170,7 @@ public final CompletableFuture getPolicy( * Get an enrich policy. Returns information about an enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -185,7 +185,7 @@ public CompletableFuture getPolicy() { * Create an enrich policy. Creates an enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy">Documentation * on elastic.co */ @@ -203,7 +203,7 @@ public CompletableFuture putPolicy(PutPolicyRequest request) * a function that initializes a builder to create the * {@link PutPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy">Documentation * on elastic.co */ @@ -219,7 +219,7 @@ public final CompletableFuture putPolicy( * enrich policies that are currently executing. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ @@ -238,7 +238,7 @@ public CompletableFuture stats(EnrichStatsRequest request) * a function that initializes a builder to create the * {@link EnrichStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ @@ -252,7 +252,7 @@ public final CompletableFuture stats( * enrich policies that are currently executing. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichClient.java index 547cb9f0c..9de008bad 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/enrich/ElasticsearchEnrichClient.java @@ -72,7 +72,7 @@ public ElasticsearchEnrichClient withTransportOptions(@Nullable TransportOptions * index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy">Documentation * on elastic.co */ @@ -91,7 +91,7 @@ public DeletePolicyResponse deletePolicy(DeletePolicyRequest request) throws IOE * a function that initializes a builder to create the * {@link DeletePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy">Documentation * on elastic.co */ @@ -107,7 +107,7 @@ public final DeletePolicyResponse deletePolicy( * Run an enrich policy. Create the enrich index for an existing enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy">Documentation * on elastic.co */ @@ -126,7 +126,7 @@ public ExecutePolicyResponse executePolicy(ExecutePolicyRequest request) * a function that initializes a builder to create the * {@link ExecutePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy">Documentation * on elastic.co */ @@ -142,7 +142,7 @@ public final ExecutePolicyResponse executePolicy( * Get an enrich policy. Returns information about an enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -160,7 +160,7 @@ public GetPolicyResponse getPolicy(GetPolicyRequest request) throws IOException, * a function that initializes a builder to create the * {@link GetPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -173,7 +173,7 @@ public final GetPolicyResponse getPolicy(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy">Documentation * on elastic.co */ @@ -188,7 +188,7 @@ public GetPolicyResponse getPolicy() throws IOException, ElasticsearchException * Create an enrich policy. Creates an enrich policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy">Documentation * on elastic.co */ @@ -206,7 +206,7 @@ public PutPolicyResponse putPolicy(PutPolicyRequest request) throws IOException, * a function that initializes a builder to create the * {@link PutPolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy">Documentation * on elastic.co */ @@ -222,7 +222,7 @@ public final PutPolicyResponse putPolicy(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ @@ -241,7 +241,7 @@ public EnrichStatsResponse stats(EnrichStatsRequest request) throws IOException, * a function that initializes a builder to create the * {@link EnrichStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ @@ -255,7 +255,7 @@ public final EnrichStatsResponse stats(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlAsyncClient.java index a6faa51d8..dc3083f89 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlAsyncClient.java @@ -73,7 +73,7 @@ public ElasticsearchEqlAsyncClient withTransportOptions(@Nullable TransportOptio * synchronous EQL search. The API also deletes results for the search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete">Documentation * on elastic.co */ @@ -92,7 +92,7 @@ public CompletableFuture delete(EqlDeleteRequest request) { * a function that initializes a builder to create the * {@link EqlDeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete">Documentation * on elastic.co */ @@ -108,7 +108,7 @@ public final CompletableFuture delete( * for an async EQL search or a stored synchronous EQL search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -129,7 +129,7 @@ public CompletableFuture> get(EqlGetRequest requ * a function that initializes a builder to create the * {@link EqlGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -143,7 +143,7 @@ public final CompletableFuture> get( * for an async EQL search or a stored synchronous EQL search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -164,7 +164,7 @@ public CompletableFuture> get(EqlGetRequest requ * a function that initializes a builder to create the * {@link EqlGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -180,7 +180,7 @@ public final CompletableFuture> get( * stored synchronous EQL search without returning results. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status">Documentation * on elastic.co */ @@ -199,7 +199,7 @@ public CompletableFuture getStatus(GetEqlStatusRequest req * a function that initializes a builder to create the * {@link GetEqlStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status">Documentation * on elastic.co */ @@ -216,7 +216,7 @@ public final CompletableFuture getStatus( * to an event. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -239,7 +239,7 @@ public CompletableFuture> search(EqlSearchReq * a function that initializes a builder to create the * {@link EqlSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -254,7 +254,7 @@ public final CompletableFuture> search( * to an event. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -276,7 +276,7 @@ public CompletableFuture> search(EqlSearchReq * a function that initializes a builder to create the * {@link EqlSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlClient.java index 4c6285064..47fd90def 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/ElasticsearchEqlClient.java @@ -74,7 +74,7 @@ public ElasticsearchEqlClient withTransportOptions(@Nullable TransportOptions tr * synchronous EQL search. The API also deletes results for the search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete">Documentation * on elastic.co */ @@ -93,7 +93,7 @@ public EqlDeleteResponse delete(EqlDeleteRequest request) throws IOException, El * a function that initializes a builder to create the * {@link EqlDeleteRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete">Documentation * on elastic.co */ @@ -109,7 +109,7 @@ public final EqlDeleteResponse delete(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -131,7 +131,7 @@ public EqlGetResponse get(EqlGetRequest request, Class * a function that initializes a builder to create the * {@link EqlGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -145,7 +145,7 @@ public final EqlGetResponse get(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -167,7 +167,7 @@ public EqlGetResponse get(EqlGetRequest request, Type tEventTyp * a function that initializes a builder to create the * {@link EqlGetRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get">Documentation * on elastic.co */ @@ -183,7 +183,7 @@ public final EqlGetResponse get(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status">Documentation * on elastic.co */ @@ -202,7 +202,7 @@ public GetEqlStatusResponse getStatus(GetEqlStatusRequest request) throws IOExce * a function that initializes a builder to create the * {@link GetEqlStatusRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status">Documentation * on elastic.co */ @@ -220,7 +220,7 @@ public final GetEqlStatusResponse getStatus( * to an event. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -243,7 +243,7 @@ public EqlSearchResponse search(EqlSearchRequest request, Class * a function that initializes a builder to create the * {@link EqlSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -259,7 +259,7 @@ public final EqlSearchResponse search( * to an event. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ @@ -282,7 +282,7 @@ public EqlSearchResponse search(EqlSearchRequest request, Type * a function that initializes a builder to create the * {@link EqlSearchRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java index 3ca8368a6..aad969b75 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java @@ -175,6 +175,11 @@ public final Boolean allowNoIndices() { } /** + * Allow query execution also in case of shard failures. If true, the query will + * keep running and will return results based on the available shards. For + * sequences, the behavior can be further refined using + * allow_partial_sequence_results + *

* API name: {@code allow_partial_search_results} */ @Nullable @@ -183,6 +188,11 @@ public final Boolean allowPartialSearchResults() { } /** + * This flag applies only to sequences and has effect only if + * allow_partial_search_results=true. If true, the sequence query will return + * results based on the available shards, ignoring the others. If false, the + * sequence query will return successfully, but will always have empty results. + *

* API name: {@code allow_partial_sequence_results} */ @Nullable @@ -549,6 +559,11 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** + * Allow query execution also in case of shard failures. If true, the query will + * keep running and will return results based on the available shards. For + * sequences, the behavior can be further refined using + * allow_partial_sequence_results + *

* API name: {@code allow_partial_search_results} */ public final Builder allowPartialSearchResults(@Nullable Boolean value) { @@ -557,6 +572,11 @@ public final Builder allowPartialSearchResults(@Nullable Boolean value) { } /** + * This flag applies only to sequences and has effect only if + * allow_partial_search_results=true. If true, the sequence query will return + * results based on the available shards, ignoring the others. If false, the + * sequence query will return successfully, but will always have empty results. + *

* API name: {@code allow_partial_sequence_results} */ public final Builder allowPartialSequenceResults(@Nullable Boolean value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/AsyncQueryStopRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/AsyncQueryStopRequest.java new file mode 100644 index 000000000..c0be6f79f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/AsyncQueryStopRequest.java @@ -0,0 +1,232 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.esql; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.BinaryEndpoint; +import co.elastic.clients.transport.endpoints.BinaryResponse; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.String; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: esql.async_query_stop.Request + +/** + * Stop async ES|QL query. + *

+ * This API interrupts the query execution and returns the results so far. If + * the Elasticsearch security features are enabled, only the user who first + * submitted the ES|QL query can stop it. + * + * @see API + * specification + */ + +public class AsyncQueryStopRequest extends RequestBase { + @Nullable + private final Boolean dropNullColumns; + + private final String id; + + // --------------------------------------------------------------------------------------------- + + private AsyncQueryStopRequest(Builder builder) { + + this.dropNullColumns = builder.dropNullColumns; + this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); + + } + + public static AsyncQueryStopRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Indicates whether columns that are entirely null will be removed + * from the columns and values portion of the results. + * If true, the response will include an extra section under the + * name all_columns which has the name of all the columns. + *

+ * API name: {@code drop_null_columns} + */ + @Nullable + public final Boolean dropNullColumns() { + return this.dropNullColumns; + } + + /** + * Required - The unique identifier of the query. A query ID is provided in the + * ES|QL async query API response for a query that does not complete in the + * designated time. A query ID is also provided when the request was submitted + * with the keep_on_completion parameter set to true. + *

+ * API name: {@code id} + */ + public final String id() { + return this.id; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link AsyncQueryStopRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Boolean dropNullColumns; + + private String id; + + /** + * Indicates whether columns that are entirely null will be removed + * from the columns and values portion of the results. + * If true, the response will include an extra section under the + * name all_columns which has the name of all the columns. + *

+ * API name: {@code drop_null_columns} + */ + public final Builder dropNullColumns(@Nullable Boolean value) { + this.dropNullColumns = value; + return this; + } + + /** + * Required - The unique identifier of the query. A query ID is provided in the + * ES|QL async query API response for a query that does not complete in the + * designated time. A query ID is also provided when the request was submitted + * with the keep_on_completion parameter set to true. + *

+ * API name: {@code id} + */ + public final Builder id(String value) { + this.id = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link AsyncQueryStopRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public AsyncQueryStopRequest build() { + _checkSingleUse(); + + return new AsyncQueryStopRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code esql.async_query_stop}". + */ + public static final Endpoint _ENDPOINT = new BinaryEndpoint<>( + "es/esql.async_query_stop", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_query"); + buf.append("/async"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.id, buf); + buf.append("/stop"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + params.put("id", request.id); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.dropNullColumns != null) { + params.put("drop_null_columns", String.valueOf(request.dropNullColumns)); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, null); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlAsyncClient.java index 2f7144e6f..2b5e5ca24 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlAsyncClient.java @@ -67,6 +67,47 @@ public ElasticsearchEsqlAsyncClient withTransportOptions(@Nullable TransportOpti return new ElasticsearchEsqlAsyncClient(this.transport, transportOptions); } + // ----- Endpoint: esql.async_query_stop + + /** + * Stop async ES|QL query. + *

+ * This API interrupts the query execution and returns the results so far. If + * the Elasticsearch security features are enabled, only the user who first + * submitted the ES|QL query can stop it. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture asyncQueryStop(AsyncQueryStopRequest request) { + @SuppressWarnings("unchecked") + Endpoint endpoint = (Endpoint) AsyncQueryStopRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Stop async ES|QL query. + *

+ * This API interrupts the query execution and returns the results so far. If + * the Elasticsearch security features are enabled, only the user who first + * submitted the ES|QL query can stop it. + * + * @param fn + * a function that initializes a builder to create the + * {@link AsyncQueryStopRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture asyncQueryStop( + Function> fn) { + return asyncQueryStop(fn.apply(new AsyncQueryStopRequest.Builder()).build()); + } + // ----- Endpoint: esql.query /** @@ -74,7 +115,7 @@ public ElasticsearchEsqlAsyncClient withTransportOptions(@Nullable TransportOpti * language) query. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html">Documentation * on elastic.co */ @@ -93,7 +134,7 @@ public CompletableFuture query(QueryRequest request) { * a function that initializes a builder to create the * {@link QueryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlClient.java index 24b56e29e..4152c36c8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/ElasticsearchEsqlClient.java @@ -68,6 +68,48 @@ public ElasticsearchEsqlClient withTransportOptions(@Nullable TransportOptions t return new ElasticsearchEsqlClient(this.transport, transportOptions); } + // ----- Endpoint: esql.async_query_stop + + /** + * Stop async ES|QL query. + *

+ * This API interrupts the query execution and returns the results so far. If + * the Elasticsearch security features are enabled, only the user who first + * submitted the ES|QL query can stop it. + * + * @see Documentation + * on elastic.co + */ + + public BinaryResponse asyncQueryStop(AsyncQueryStopRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + Endpoint endpoint = (Endpoint) AsyncQueryStopRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Stop async ES|QL query. + *

+ * This API interrupts the query execution and returns the results so far. If + * the Elasticsearch security features are enabled, only the user who first + * submitted the ES|QL query can stop it. + * + * @param fn + * a function that initializes a builder to create the + * {@link AsyncQueryStopRequest} + * @see Documentation + * on elastic.co + */ + + public final BinaryResponse asyncQueryStop( + Function> fn) + throws IOException, ElasticsearchException { + return asyncQueryStop(fn.apply(new AsyncQueryStopRequest.Builder()).build()); + } + // ----- Endpoint: esql.query /** @@ -75,7 +117,7 @@ public ElasticsearchEsqlClient withTransportOptions(@Nullable TransportOptions t * language) query. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html">Documentation * on elastic.co */ @@ -94,7 +136,7 @@ public BinaryResponse query(QueryRequest request) throws IOException, Elasticsea * a function that initializes a builder to create the * {@link QueryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/query/EsqlFormat.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/EsqlFormat.java similarity index 94% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/esql/query/EsqlFormat.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/esql/EsqlFormat.java index f8870ad48..a61c17bd1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/query/EsqlFormat.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/EsqlFormat.java @@ -17,7 +17,7 @@ * under the License. */ -package co.elastic.clients.elasticsearch.esql.query; +package co.elastic.clients.elasticsearch.esql; import co.elastic.clients.json.JsonEnum; import co.elastic.clients.json.JsonpDeserializable; @@ -40,7 +40,7 @@ /** * - * @see API + * @see API * specification */ @JsonpDeserializable diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/QueryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/QueryRequest.java index 0d235bbc5..41bebd62f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/QueryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/esql/QueryRequest.java @@ -23,7 +23,6 @@ import co.elastic.clients.elasticsearch._types.FieldValue; import co.elastic.clients.elasticsearch._types.RequestBase; import co.elastic.clients.elasticsearch._types.query_dsl.Query; -import co.elastic.clients.elasticsearch.esql.query.EsqlFormat; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java index 395676253..47fde26fa 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java @@ -86,7 +86,7 @@ public ElasticsearchFeaturesAsyncClient withTransportOptions(@Nullable Transport * the plugin that defines that feature must be installed on the master node. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -117,7 +117,7 @@ public CompletableFuture getFeatures(GetFeaturesRequest req * a function that initializes a builder to create the * {@link GetFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -143,7 +143,7 @@ public final CompletableFuture getFeatures( * the plugin that defines that feature must be installed on the master node. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -180,7 +180,7 @@ public CompletableFuture getFeatures() { * doubts about which plugins are installed on individual nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ @@ -220,7 +220,7 @@ public CompletableFuture resetFeatures(ResetFeaturesReque * a function that initializes a builder to create the * {@link ResetFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ @@ -255,7 +255,7 @@ public final CompletableFuture resetFeatures( * doubts about which plugins are installed on individual nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java index 80e648c91..ac7846bd4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java @@ -84,7 +84,7 @@ public ElasticsearchFeaturesClient withTransportOptions(@Nullable TransportOptio * the plugin that defines that feature must be installed on the master node. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -115,7 +115,7 @@ public GetFeaturesResponse getFeatures(GetFeaturesRequest request) throws IOExce * a function that initializes a builder to create the * {@link GetFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -142,7 +142,7 @@ public final GetFeaturesResponse getFeatures( * the plugin that defines that feature must be installed on the master node. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features">Documentation * on elastic.co */ @@ -179,7 +179,7 @@ public GetFeaturesResponse getFeatures() throws IOException, ElasticsearchExcept * doubts about which plugins are installed on individual nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ @@ -220,7 +220,7 @@ public ResetFeaturesResponse resetFeatures(ResetFeaturesRequest request) * a function that initializes a builder to create the * {@link ResetFeaturesRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ @@ -256,7 +256,7 @@ public final ResetFeaturesResponse resetFeatures( * doubts about which plugins are installed on individual nodes. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetAsyncClient.java index 94e8a968d..d30c7a1c8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetAsyncClient.java @@ -74,7 +74,9 @@ public ElasticsearchFleetAsyncClient withTransportOptions(@Nullable TransportOpt * where the search will be run only after the provided checkpoint has been * processed and is visible for searches inside of Elasticsearch. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public CompletableFuture> search(FleetSearchRequest request, @@ -95,7 +97,9 @@ public CompletableFuture> search(Flee * @param fn * a function that initializes a builder to create the * {@link FleetSearchRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final CompletableFuture> search( @@ -109,7 +113,9 @@ public final CompletableFuture> searc * where the search will be run only after the provided checkpoint has been * processed and is visible for searches inside of Elasticsearch. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public CompletableFuture> search(FleetSearchRequest request, @@ -130,7 +136,9 @@ public CompletableFuture> search(Flee * @param fn * a function that initializes a builder to create the * {@link FleetSearchRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final CompletableFuture> search( diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java index 30503174d..6f1cce545 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java @@ -74,7 +74,9 @@ public ElasticsearchFleetClient withTransportOptions(@Nullable TransportOptions * where the search will be run only after the provided checkpoint has been * processed and is visible for searches inside of Elasticsearch. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public FleetSearchResponse search(FleetSearchRequest request, @@ -95,7 +97,9 @@ public FleetSearchResponse search(FleetSearchRequest requ * @param fn * a function that initializes a builder to create the * {@link FleetSearchRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final FleetSearchResponse search( @@ -109,7 +113,9 @@ public final FleetSearchResponse search( * where the search will be run only after the provided checkpoint has been * processed and is visible for searches inside of Elasticsearch. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public FleetSearchResponse search(FleetSearchRequest request, Type tDocumentType) @@ -130,7 +136,9 @@ public FleetSearchResponse search(FleetSearchRequest requ * @param fn * a function that initializes a builder to create the * {@link FleetSearchRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final FleetSearchResponse search( diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphAsyncClient.java index 46ddcc73a..337e3dca7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphAsyncClient.java @@ -78,7 +78,7 @@ public ElasticsearchGraphAsyncClient withTransportOptions(@Nullable TransportOpt * interest. You can exclude vertices that have already been returned. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph">Documentation * on elastic.co */ @@ -103,7 +103,7 @@ public CompletableFuture explore(ExploreRequest request) { * a function that initializes a builder to create the * {@link ExploreRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphClient.java index 47056e9bc..558b3272f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/graph/ElasticsearchGraphClient.java @@ -78,7 +78,7 @@ public ElasticsearchGraphClient withTransportOptions(@Nullable TransportOptions * interest. You can exclude vertices that have already been returned. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph">Documentation * on elastic.co */ @@ -103,7 +103,7 @@ public ExploreResponse explore(ExploreRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link ExploreRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java index e33bb0ace..5da069230 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java @@ -72,7 +72,7 @@ public ElasticsearchIlmAsyncClient withTransportOptions(@Nullable TransportOptio * returns an error. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle">Documentation * on elastic.co */ @@ -92,7 +92,7 @@ public CompletableFuture deleteLifecycle(DeleteLifecycl * a function that initializes a builder to create the * {@link DeleteLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle">Documentation * on elastic.co */ @@ -112,7 +112,7 @@ public final CompletableFuture deleteLifecycle( * the definition of the running phase, and information about any failures. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle">Documentation * on elastic.co */ @@ -135,7 +135,7 @@ public CompletableFuture explainLifecycle(ExplainLifec * a function that initializes a builder to create the * {@link ExplainLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle">Documentation * on elastic.co */ @@ -150,7 +150,7 @@ public final CompletableFuture explainLifecycle( * Get lifecycle policies. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -168,7 +168,7 @@ public CompletableFuture getLifecycle(GetLifecycleRequest * a function that initializes a builder to create the * {@link GetLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -181,7 +181,7 @@ public final CompletableFuture getLifecycle( * Get lifecycle policies. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -193,10 +193,12 @@ public CompletableFuture getLifecycle() { // ----- Endpoint: ilm.get_status /** - * Get the ILM status. Get the current index lifecycle management status. + * Get the ILM status. + *

+ * Get the current index lifecycle management status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status">Documentation * on elastic.co */ public CompletableFuture getStatus() { @@ -228,7 +230,7 @@ public CompletableFuture getStatus() { * STOPPED. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -264,7 +266,7 @@ public CompletableFuture migrateToDataTiers(MigrateT * a function that initializes a builder to create the * {@link MigrateToDataTiersRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -295,7 +297,7 @@ public final CompletableFuture migrateToDataTiers( * STOPPED. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -329,7 +331,7 @@ public CompletableFuture migrateToDataTiers() { * index cannot move to a step that is not part of its policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step">Documentation * on elastic.co */ @@ -366,7 +368,7 @@ public CompletableFuture moveToStep(MoveToStepRequest reques * a function that initializes a builder to create the * {@link MoveToStepRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step">Documentation * on elastic.co */ @@ -385,7 +387,7 @@ public final CompletableFuture moveToStep( * previous versions. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle">Documentation * on elastic.co */ @@ -407,7 +409,7 @@ public CompletableFuture putLifecycle(PutLifecycleRequest * a function that initializes a builder to create the * {@link PutLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle">Documentation * on elastic.co */ @@ -423,7 +425,7 @@ public final CompletableFuture putLifecycle( * index or a data stream's backing indices. It also stops managing the indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy">Documentation * on elastic.co */ @@ -442,7 +444,7 @@ public CompletableFuture removePolicy(RemovePolicyRequest * a function that initializes a builder to create the * {@link RemovePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy">Documentation * on elastic.co */ @@ -460,7 +462,7 @@ public final CompletableFuture removePolicy( * whether an index is in the ERROR step. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry">Documentation * on elastic.co */ @@ -481,7 +483,7 @@ public CompletableFuture retry(RetryRequest request) { * a function that initializes a builder to create the * {@link RetryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry">Documentation * on elastic.co */ @@ -499,7 +501,7 @@ public final CompletableFuture retry( * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -520,7 +522,7 @@ public CompletableFuture start(StartIlmRequest request) { * a function that initializes a builder to create the * {@link StartIlmRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -536,7 +538,7 @@ public final CompletableFuture start( * API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -559,7 +561,7 @@ public CompletableFuture start() { * is running. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ @@ -585,7 +587,7 @@ public CompletableFuture stop(StopIlmRequest request) { * a function that initializes a builder to create the * {@link StopIlmRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ @@ -606,7 +608,7 @@ public final CompletableFuture stop( * is running. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java index 6b3e3a8f5..cb92a3d3a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java @@ -73,7 +73,7 @@ public ElasticsearchIlmClient withTransportOptions(@Nullable TransportOptions tr * returns an error. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle">Documentation * on elastic.co */ @@ -94,7 +94,7 @@ public DeleteLifecycleResponse deleteLifecycle(DeleteLifecycleRequest request) * a function that initializes a builder to create the * {@link DeleteLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle">Documentation * on elastic.co */ @@ -115,7 +115,7 @@ public final DeleteLifecycleResponse deleteLifecycle( * the definition of the running phase, and information about any failures. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle">Documentation * on elastic.co */ @@ -139,7 +139,7 @@ public ExplainLifecycleResponse explainLifecycle(ExplainLifecycleRequest request * a function that initializes a builder to create the * {@link ExplainLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle">Documentation * on elastic.co */ @@ -155,7 +155,7 @@ public final ExplainLifecycleResponse explainLifecycle( * Get lifecycle policies. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -173,7 +173,7 @@ public GetLifecycleResponse getLifecycle(GetLifecycleRequest request) throws IOE * a function that initializes a builder to create the * {@link GetLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -187,7 +187,7 @@ public final GetLifecycleResponse getLifecycle( * Get lifecycle policies. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle">Documentation * on elastic.co */ @@ -199,10 +199,12 @@ public GetLifecycleResponse getLifecycle() throws IOException, ElasticsearchExce // ----- Endpoint: ilm.get_status /** - * Get the ILM status. Get the current index lifecycle management status. + * Get the ILM status. + *

+ * Get the current index lifecycle management status. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status">Documentation * on elastic.co */ public GetIlmStatusResponse getStatus() throws IOException, ElasticsearchException { @@ -234,7 +236,7 @@ public GetIlmStatusResponse getStatus() throws IOException, ElasticsearchExcepti * STOPPED. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -271,7 +273,7 @@ public MigrateToDataTiersResponse migrateToDataTiers(MigrateToDataTiersRequest r * a function that initializes a builder to create the * {@link MigrateToDataTiersRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -303,7 +305,7 @@ public final MigrateToDataTiersResponse migrateToDataTiers( * STOPPED. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers">Documentation * on elastic.co */ @@ -337,7 +339,7 @@ public MigrateToDataTiersResponse migrateToDataTiers() throws IOException, Elast * index cannot move to a step that is not part of its policy. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step">Documentation * on elastic.co */ @@ -374,7 +376,7 @@ public MoveToStepResponse moveToStep(MoveToStepRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link MoveToStepRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step">Documentation * on elastic.co */ @@ -393,7 +395,7 @@ public final MoveToStepResponse moveToStep(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle">Documentation * on elastic.co */ @@ -415,7 +417,7 @@ public PutLifecycleResponse putLifecycle(PutLifecycleRequest request) throws IOE * a function that initializes a builder to create the * {@link PutLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle">Documentation * on elastic.co */ @@ -432,7 +434,7 @@ public final PutLifecycleResponse putLifecycle( * index or a data stream's backing indices. It also stops managing the indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy">Documentation * on elastic.co */ @@ -451,7 +453,7 @@ public RemovePolicyResponse removePolicy(RemovePolicyRequest request) throws IOE * a function that initializes a builder to create the * {@link RemovePolicyRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy">Documentation * on elastic.co */ @@ -470,7 +472,7 @@ public final RemovePolicyResponse removePolicy( * whether an index is in the ERROR step. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry">Documentation * on elastic.co */ @@ -491,7 +493,7 @@ public RetryResponse retry(RetryRequest request) throws IOException, Elasticsear * a function that initializes a builder to create the * {@link RetryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry">Documentation * on elastic.co */ @@ -509,7 +511,7 @@ public final RetryResponse retry(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -530,7 +532,7 @@ public StartIlmResponse start(StartIlmRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link StartIlmRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -546,7 +548,7 @@ public final StartIlmResponse start(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start">Documentation * on elastic.co */ @@ -569,7 +571,7 @@ public StartIlmResponse start() throws IOException, ElasticsearchException { * is running. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ @@ -595,7 +597,7 @@ public StopIlmResponse stop(StopIlmRequest request) throws IOException, Elastics * a function that initializes a builder to create the * {@link StopIlmRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ @@ -616,7 +618,7 @@ public final StopIlmResponse stop(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java index b3640093b..6b267387a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java @@ -50,7 +50,9 @@ // typedef: ilm.get_status.Request /** - * Get the ILM status. Get the current index lifecycle management status. + * Get the ILM status. + *

+ * Get the current index lifecycle management status. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AddBlockRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AddBlockRequest.java index 575697586..3e0c24f0b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AddBlockRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AddBlockRequest.java @@ -61,8 +61,10 @@ // typedef: indices.add_block.Request /** - * Add an index block. Limits the operations allowed on an index by blocking - * specific operation types. + * Add an index block. + *

+ * Add an index block to an index. Index blocks limit the operations allowed on + * an index by blocking specific operation types. * * @see API * specification @@ -106,9 +108,12 @@ public static AddBlockRequest of(Function_all string or when no indices have been - * specified) + * If false, the request returns an error if any wildcard + * expression, index alias, or _all value targets only missing or + * closed indices. This behavior applies even if the request targets other open + * indices. For example, a request targeting foo*,bar* returns an + * error if an index starts with foo but no index starts with + * bar. *

* API name: {@code allow_no_indices} */ @@ -118,7 +123,7 @@ public final Boolean allowNoIndices() { } /** - * Required - The block to add (one of read, write, read_only or metadata) + * Required - The block type to add to the index. *

* API name: {@code block} */ @@ -127,8 +132,10 @@ public final IndicesBlockOptions block() { } /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} */ @@ -137,8 +144,8 @@ public final List expandWildcards() { } /** - * Whether specified concrete indices should be ignored when unavailable - * (missing or closed) + * If false, the request returns an error if it targets a missing + * or closed index. *

* API name: {@code ignore_unavailable} */ @@ -148,7 +155,13 @@ public final Boolean ignoreUnavailable() { } /** - * Required - A comma separated list of indices to add a block to + * Required - A comma-separated list or wildcard expression of index names used + * to limit the request. By default, you must explicitly name the indices you + * are adding blocks to. To allow the adding of blocks to indices with + * _all, *, or other wildcard expressions, change the + * action.destructive_requires_name setting to false. + * You can update this setting in the elasticsearch.yml file or by + * using the cluster update settings API. *

* API name: {@code index} */ @@ -157,7 +170,10 @@ public final String index() { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ @@ -167,7 +183,11 @@ public final Time masterTimeout() { } /** - * Explicit operation timeout + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * will indicate that it was not completely acknowledged. It can also be set to + * -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -203,9 +223,12 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Time timeout; /** - * Whether to ignore if a wildcard indices expression resolves into no concrete - * indices. (This includes _all string or when no indices have been - * specified) + * If false, the request returns an error if any wildcard + * expression, index alias, or _all value targets only missing or + * closed indices. This behavior applies even if the request targets other open + * indices. For example, a request targeting foo*,bar* returns an + * error if an index starts with foo but no index starts with + * bar. *

* API name: {@code allow_no_indices} */ @@ -215,7 +238,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { } /** - * Required - The block to add (one of read, write, read_only or metadata) + * Required - The block type to add to the index. *

* API name: {@code block} */ @@ -225,8 +248,10 @@ public final Builder block(IndicesBlockOptions value) { } /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -238,8 +263,10 @@ public final Builder expandWildcards(List list) { } /** - * Whether to expand wildcard expression to concrete indices that are open, - * closed or both. + * The type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. It supports comma-separated values, such as + * open,hidden. *

* API name: {@code expand_wildcards} *

@@ -251,8 +278,8 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val } /** - * Whether specified concrete indices should be ignored when unavailable - * (missing or closed) + * If false, the request returns an error if it targets a missing + * or closed index. *

* API name: {@code ignore_unavailable} */ @@ -262,7 +289,13 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { } /** - * Required - A comma separated list of indices to add a block to + * Required - A comma-separated list or wildcard expression of index names used + * to limit the request. By default, you must explicitly name the indices you + * are adding blocks to. To allow the adding of blocks to indices with + * _all, *, or other wildcard expressions, change the + * action.destructive_requires_name setting to false. + * You can update this setting in the elasticsearch.yml file or by + * using the cluster update settings API. *

* API name: {@code index} */ @@ -272,7 +305,10 @@ public final Builder index(String value) { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ @@ -282,7 +318,10 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Specify timeout for connection to master + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. It can + * also be set to -1 to indicate that the request should never + * timeout. *

* API name: {@code master_timeout} */ @@ -291,7 +330,11 @@ public final Builder masterTimeout(Function> f } /** - * Explicit operation timeout + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * will indicate that it was not completely acknowledged. It can also be set to + * -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ @@ -301,7 +344,11 @@ public final Builder timeout(@Nullable Time value) { } /** - * Explicit operation timeout + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * will indicate that it was not completely acknowledged. It can also be set to + * -1 to indicate that the request should never timeout. *

* API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexRequest.java new file mode 100644 index 000000000..e87f7eeef --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexRequest.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.cancel_migrate_reindex.Request + +/** + * Cancel a migration reindex operation. + *

+ * Cancel a migration reindex attempt for a data stream or index. + * + * @see API + * specification + */ + +public class CancelMigrateReindexRequest extends RequestBase { + private final List index; + + // --------------------------------------------------------------------------------------------- + + private CancelMigrateReindexRequest(Builder builder) { + + this.index = ApiTypeHelper.unmodifiableRequired(builder.index, this, "index"); + + } + + public static CancelMigrateReindexRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The index or data stream name + *

+ * API name: {@code index} + */ + public final List index() { + return this.index; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link CancelMigrateReindexRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private List index; + + /** + * Required - The index or data stream name + *

+ * API name: {@code index} + *

+ * Adds all elements of list to index. + */ + public final Builder index(List list) { + this.index = _listAddAll(this.index, list); + return this; + } + + /** + * Required - The index or data stream name + *

+ * API name: {@code index} + *

+ * Adds one or more values to index. + */ + public final Builder index(String value, String... values) { + this.index = _listAdd(this.index, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link CancelMigrateReindexRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public CancelMigrateReindexRequest build() { + _checkSingleUse(); + + return new CancelMigrateReindexRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.cancel_migrate_reindex}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.cancel_migrate_reindex", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + final int _index = 1 << 0; + + int propsSet = 0; + + propsSet |= _index; + + if (propsSet == (_index)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_migration"); + buf.append("/reindex"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.index.stream().map(v -> v).collect(Collectors.joining(",")), buf); + buf.append("/_cancel"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _index = 1 << 0; + + int propsSet = 0; + + propsSet |= _index; + + if (propsSet == (_index)) { + params.put("index", request.index.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), false, CancelMigrateReindexResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexResponse.java new file mode 100644 index 000000000..3a8d7445c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CancelMigrateReindexResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.cancel_migrate_reindex.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class CancelMigrateReindexResponse extends AcknowledgedResponseBase { + // --------------------------------------------------------------------------------------------- + + private CancelMigrateReindexResponse(Builder builder) { + super(builder); + + } + + public static CancelMigrateReindexResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link CancelMigrateReindexResponse}. + */ + + public static class Builder extends AcknowledgedResponseBase.AbstractBuilder + implements + ObjectBuilder { + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link CancelMigrateReindexResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public CancelMigrateReindexResponse build() { + _checkSingleUse(); + + return new CancelMigrateReindexResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link CancelMigrateReindexResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, CancelMigrateReindexResponse::setupCancelMigrateReindexResponseDeserializer); + + protected static void setupCancelMigrateReindexResponseDeserializer( + ObjectDeserializer op) { + AcknowledgedResponseBase.setupAcknowledgedResponseBaseDeserializer(op); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateDataStreamRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateDataStreamRequest.java index a93f1bbe0..5bdd675ca 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateDataStreamRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateDataStreamRequest.java @@ -56,8 +56,9 @@ // typedef: indices.create_data_stream.Request /** - * Create a data stream. Creates a data stream. You must have a matching index - * template with data stream enabled. + * Create a data stream. + *

+ * You must have a matching index template with data stream enabled. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromRequest.java new file mode 100644 index 000000000..9af879ca7 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromRequest.java @@ -0,0 +1,271 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch.indices.create_from.CreateFrom; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import jakarta.json.stream.JsonParser; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.create_from.Request + +/** + * Create an index from a source index. + *

+ * Copy the mappings and settings from the source index to a destination index + * while allowing request settings and mappings to override the source values. + * + * @see API + * specification + */ +@JsonpDeserializable +public class CreateFromRequest extends RequestBase implements JsonpSerializable { + private final String dest; + + private final String source; + + private final CreateFrom createFrom; + + // --------------------------------------------------------------------------------------------- + + private CreateFromRequest(Builder builder) { + + this.dest = ApiTypeHelper.requireNonNull(builder.dest, this, "dest"); + this.source = ApiTypeHelper.requireNonNull(builder.source, this, "source"); + this.createFrom = ApiTypeHelper.requireNonNull(builder.createFrom, this, "createFrom"); + + } + + public static CreateFromRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The destination index or data stream name + *

+ * API name: {@code dest} + */ + public final String dest() { + return this.dest; + } + + /** + * Required - The source index or data stream name + *

+ * API name: {@code source} + */ + public final String source() { + return this.source; + } + + /** + * Required - Request body. + */ + public final CreateFrom createFrom() { + return this.createFrom; + } + + /** + * Serialize this value to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + this.createFrom.serialize(generator, mapper); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link CreateFromRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String dest; + + private String source; + + private CreateFrom createFrom; + + /** + * Required - The destination index or data stream name + *

+ * API name: {@code dest} + */ + public final Builder dest(String value) { + this.dest = value; + return this; + } + + /** + * Required - The source index or data stream name + *

+ * API name: {@code source} + */ + public final Builder source(String value) { + this.source = value; + return this; + } + + /** + * Required - Request body. + */ + public final Builder createFrom(CreateFrom value) { + this.createFrom = value; + return this; + } + + /** + * Required - Request body. + */ + public final Builder createFrom(Function> fn) { + return this.createFrom(fn.apply(new CreateFrom.Builder()).build()); + } + + @Override + public Builder withJson(JsonParser parser, JsonpMapper mapper) { + + @SuppressWarnings("unchecked") + CreateFrom value = (CreateFrom) CreateFrom._DESERIALIZER.deserialize(parser, mapper); + return this.createFrom(value); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link CreateFromRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public CreateFromRequest build() { + _checkSingleUse(); + + return new CreateFromRequest(this); + } + } + + public static final JsonpDeserializer _DESERIALIZER = createCreateFromRequestDeserializer(); + protected static JsonpDeserializer createCreateFromRequestDeserializer() { + + JsonpDeserializer valueDeserializer = CreateFrom._DESERIALIZER; + + return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() + .createFrom(valueDeserializer.deserialize(parser, mapper, event)).build()); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.create_from}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.create_from", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _source = 1 << 0; + final int _dest = 1 << 1; + + int propsSet = 0; + + propsSet |= _source; + propsSet |= _dest; + + if (propsSet == (_source | _dest)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_create_from"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.source, buf); + buf.append("/"); + SimpleEndpoint.pathEncode(request.dest, buf); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _source = 1 << 0; + final int _dest = 1 << 1; + + int propsSet = 0; + + propsSet |= _source; + propsSet |= _dest; + + if (propsSet == (_source | _dest)) { + params.put("source", request.source); + params.put("dest", request.dest); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, CreateFromResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromResponse.java similarity index 75% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeResponse.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromResponse.java index 4eb4b3699..c40c79461 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateFromResponse.java @@ -32,8 +32,10 @@ import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.lang.Boolean; +import java.lang.String; import java.util.Objects; import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,29 +52,32 @@ // //---------------------------------------------------------------- -// typedef: indices.unfreeze.Response +// typedef: indices.create_from.Response /** * - * @see API + * @see API * specification */ @JsonpDeserializable -public class UnfreezeResponse implements AcknowledgedResponse, JsonpSerializable { +public class CreateFromResponse implements AcknowledgedResponse, JsonpSerializable { private final boolean acknowledged; + private final String index; + private final boolean shardsAcknowledged; // --------------------------------------------------------------------------------------------- - private UnfreezeResponse(Builder builder) { + private CreateFromResponse(Builder builder) { this.acknowledged = ApiTypeHelper.requireNonNull(builder.acknowledged, this, "acknowledged"); + this.index = ApiTypeHelper.requireNonNull(builder.index, this, "index"); this.shardsAcknowledged = ApiTypeHelper.requireNonNull(builder.shardsAcknowledged, this, "shardsAcknowledged"); } - public static UnfreezeResponse of(Function> fn) { + public static CreateFromResponse of(Function> fn) { return fn.apply(new Builder()).build(); } @@ -83,6 +88,13 @@ public final boolean acknowledged() { return this.acknowledged; } + /** + * Required - API name: {@code index} + */ + public final String index() { + return this.index; + } + /** * Required - API name: {@code shards_acknowledged} */ @@ -104,6 +116,9 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("acknowledged"); generator.write(this.acknowledged); + generator.writeKey("index"); + generator.write(this.index); + generator.writeKey("shards_acknowledged"); generator.write(this.shardsAcknowledged); @@ -117,12 +132,16 @@ public String toString() { // --------------------------------------------------------------------------------------------- /** - * Builder for {@link UnfreezeResponse}. + * Builder for {@link CreateFromResponse}. */ - public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { private Boolean acknowledged; + private String index; + private Boolean shardsAcknowledged; /** @@ -133,6 +152,14 @@ public final Builder acknowledged(boolean value) { return this; } + /** + * Required - API name: {@code index} + */ + public final Builder index(String value) { + this.index = value; + return this; + } + /** * Required - API name: {@code shards_acknowledged} */ @@ -147,29 +174,30 @@ protected Builder self() { } /** - * Builds a {@link UnfreezeResponse}. + * Builds a {@link CreateFromResponse}. * * @throws NullPointerException * if some of the required fields are null. */ - public UnfreezeResponse build() { + public CreateFromResponse build() { _checkSingleUse(); - return new UnfreezeResponse(this); + return new CreateFromResponse(this); } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link UnfreezeResponse} + * Json deserializer for {@link CreateFromResponse} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, - UnfreezeResponse::setupUnfreezeResponseDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, CreateFromResponse::setupCreateFromResponseDeserializer); - protected static void setupUnfreezeResponseDeserializer(ObjectDeserializer op) { + protected static void setupCreateFromResponseDeserializer(ObjectDeserializer op) { op.add(Builder::acknowledged, JsonpDeserializer.booleanDeserializer(), "acknowledged"); + op.add(Builder::index, JsonpDeserializer.stringDeserializer(), "index"); op.add(Builder::shardsAcknowledged, JsonpDeserializer.booleanDeserializer(), "shards_acknowledged"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamsStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamsStatsRequest.java index 04e7ed9f3..64b33ea80 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamsStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamsStatsRequest.java @@ -58,7 +58,9 @@ // typedef: indices.data_streams_stats.Request /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java index 4f98d946d..f27da3611 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java @@ -71,11 +71,13 @@ public ElasticsearchIndicesAsyncClient withTransportOptions(@Nullable TransportO // ----- Endpoint: indices.add_block /** - * Add an index block. Limits the operations allowed on an index by blocking - * specific operation types. + * Add an index block. + *

+ * Add an index block to an index. Index blocks limit the operations allowed on + * an index by blocking specific operation types. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block">Documentation * on elastic.co */ @@ -87,14 +89,16 @@ public CompletableFuture addBlock(AddBlockRequest request) { } /** - * Add an index block. Limits the operations allowed on an index by blocking - * specific operation types. + * Add an index block. + *

+ * Add an index block to an index. Index blocks limit the operations allowed on + * an index by blocking specific operation types. * * @param fn * a function that initializes a builder to create the * {@link AddBlockRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block">Documentation * on elastic.co */ @@ -116,7 +120,7 @@ public final CompletableFuture addBlock( * specified index will always use 10000 as its limit. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -141,7 +145,7 @@ public CompletableFuture analyze(AnalyzeRequest request) { * a function that initializes a builder to create the * {@link AnalyzeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -161,7 +165,7 @@ public final CompletableFuture analyze( * specified index will always use 10000 as its limit. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -170,6 +174,43 @@ public CompletableFuture analyze() { this.transportOptions); } + // ----- Endpoint: indices.cancel_migrate_reindex + + /** + * Cancel a migration reindex operation. + *

+ * Cancel a migration reindex attempt for a data stream or index. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture cancelMigrateReindex(CancelMigrateReindexRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) CancelMigrateReindexRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Cancel a migration reindex operation. + *

+ * Cancel a migration reindex attempt for a data stream or index. + * + * @param fn + * a function that initializes a builder to create the + * {@link CancelMigrateReindexRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture cancelMigrateReindex( + Function> fn) { + return cancelMigrateReindex(fn.apply(new CancelMigrateReindexRequest.Builder()).build()); + } + // ----- Endpoint: indices.clear_cache /** @@ -182,7 +223,7 @@ public CompletableFuture analyze() { * use the fields parameter. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -206,7 +247,7 @@ public CompletableFuture clearCache(ClearCacheRequest reques * a function that initializes a builder to create the * {@link ClearCacheRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -225,7 +266,7 @@ public final CompletableFuture clearCache( * use the fields parameter. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -306,7 +347,7 @@ public CompletableFuture clearCache() { * action as well. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone">Documentation * on elastic.co */ @@ -390,7 +431,7 @@ public CompletableFuture clone(CloneIndexRequest request) { * a function that initializes a builder to create the * {@link CloneIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone">Documentation * on elastic.co */ @@ -431,7 +472,7 @@ public final CompletableFuture clone( * false. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close">Documentation * on elastic.co */ @@ -475,7 +516,7 @@ public CompletableFuture close(CloseIndexRequest request) { * a function that initializes a builder to create the * {@link CloseIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close">Documentation * on elastic.co */ @@ -521,7 +562,7 @@ public final CompletableFuture close( * wait_for_active_shards value on all subsequent write operations. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create">Documentation * on elastic.co */ @@ -570,7 +611,7 @@ public CompletableFuture create(CreateIndexRequest request) * a function that initializes a builder to create the * {@link CreateIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create">Documentation * on elastic.co */ @@ -582,11 +623,12 @@ public final CompletableFuture create( // ----- Endpoint: indices.create_data_stream /** - * Create a data stream. Creates a data stream. You must have a matching index - * template with data stream enabled. + * Create a data stream. + *

+ * You must have a matching index template with data stream enabled. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream">Documentation * on elastic.co */ @@ -598,14 +640,15 @@ public CompletableFuture createDataStream(CreateDataSt } /** - * Create a data stream. Creates a data stream. You must have a matching index - * template with data stream enabled. + * Create a data stream. + *

+ * You must have a matching index template with data stream enabled. * * @param fn * a function that initializes a builder to create the * {@link CreateDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream">Documentation * on elastic.co */ @@ -614,13 +657,54 @@ public final CompletableFuture createDataStream( return createDataStream(fn.apply(new CreateDataStreamRequest.Builder()).build()); } + // ----- Endpoint: indices.create_from + + /** + * Create an index from a source index. + *

+ * Copy the mappings and settings from the source index to a destination index + * while allowing request settings and mappings to override the source values. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture createFrom(CreateFromRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) CreateFromRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Create an index from a source index. + *

+ * Copy the mappings and settings from the source index to a destination index + * while allowing request settings and mappings to override the source values. + * + * @param fn + * a function that initializes a builder to create the + * {@link CreateFromRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture createFrom( + Function> fn) { + return createFrom(fn.apply(new CreateFromRequest.Builder()).build()); + } + // ----- Endpoint: indices.data_streams_stats /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -632,13 +716,15 @@ public CompletableFuture dataStreamsStats(DataStreamsS } /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link DataStreamsStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -648,10 +734,12 @@ public final CompletableFuture dataStreamsStats( } /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -672,7 +760,7 @@ public CompletableFuture dataStreamsStats() { * You can then use the delete index API to delete the previous write index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete">Documentation * on elastic.co */ @@ -696,7 +784,7 @@ public CompletableFuture delete(DeleteIndexRequest request) * a function that initializes a builder to create the * {@link DeleteIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete">Documentation * on elastic.co */ @@ -711,7 +799,7 @@ public final CompletableFuture delete( * Delete an alias. Removes a data stream or index from an alias. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias">Documentation * on elastic.co */ @@ -729,7 +817,7 @@ public CompletableFuture deleteAlias(DeleteAliasRequest req * a function that initializes a builder to create the * {@link DeleteAliasRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias">Documentation * on elastic.co */ @@ -745,7 +833,7 @@ public final CompletableFuture deleteAlias( * stream, rendering it not managed by the data stream lifecycle. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle">Documentation * on elastic.co */ @@ -764,7 +852,7 @@ public CompletableFuture deleteDataLifecycle(Delete * a function that initializes a builder to create the * {@link DeleteDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle">Documentation * on elastic.co */ @@ -780,7 +868,7 @@ public final CompletableFuture deleteDataLifecycle( * indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream">Documentation * on elastic.co */ @@ -799,7 +887,7 @@ public CompletableFuture deleteDataStream(DeleteDataSt * a function that initializes a builder to create the * {@link DeleteDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream">Documentation * on elastic.co */ @@ -817,7 +905,7 @@ public final CompletableFuture deleteDataStream( * match completely with existing templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template">Documentation * on elastic.co */ @@ -838,7 +926,7 @@ public CompletableFuture deleteIndexTemplate(Delete * a function that initializes a builder to create the * {@link DeleteIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template">Documentation * on elastic.co */ @@ -853,7 +941,7 @@ public final CompletableFuture deleteIndexTemplate( * Delete a legacy index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template">Documentation * on elastic.co */ @@ -871,7 +959,7 @@ public CompletableFuture deleteTemplate(DeleteTemplateRe * a function that initializes a builder to create the * {@link DeleteTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template">Documentation * on elastic.co */ @@ -897,7 +985,7 @@ public final CompletableFuture deleteTemplate( * underestimated while the _source field is overestimated. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage">Documentation * on elastic.co */ @@ -926,7 +1014,7 @@ public CompletableFuture diskUsage(DiskUsageRequest request) * a function that initializes a builder to create the * {@link DiskUsageRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage">Documentation * on elastic.co */ @@ -951,7 +1039,7 @@ public final CompletableFuture diskUsage( * index must be read only (index.blocks.write: true). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample">Documentation * on elastic.co */ @@ -979,7 +1067,7 @@ public CompletableFuture downsample(DownsampleRequest reques * a function that initializes a builder to create the * {@link DownsampleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample">Documentation * on elastic.co */ @@ -995,7 +1083,7 @@ public final CompletableFuture downsample( * exist. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists">Documentation * on elastic.co */ @@ -1014,7 +1102,7 @@ public CompletableFuture exists(ExistsRequest request) { * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists">Documentation * on elastic.co */ @@ -1026,10 +1114,12 @@ public final CompletableFuture exists( // ----- Endpoint: indices.exists_alias /** - * Check aliases. Checks if one or more data stream or index aliases exist. + * Check aliases. + *

+ * Check if one or more data stream or index aliases exist. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias">Documentation * on elastic.co */ @@ -1041,13 +1131,15 @@ public CompletableFuture existsAlias(ExistsAliasRequest request } /** - * Check aliases. Checks if one or more data stream or index aliases exist. + * Check aliases. + *

+ * Check if one or more data stream or index aliases exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsAliasRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias">Documentation * on elastic.co */ @@ -1059,10 +1151,12 @@ public final CompletableFuture existsAlias( // ----- Endpoint: indices.exists_index_template /** - * Check index templates. Check whether index templates exist. + * Check index templates. + *

+ * Check whether index templates exist. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template">Documentation * on elastic.co */ @@ -1074,13 +1168,15 @@ public CompletableFuture existsIndexTemplate(ExistsIndexTemplat } /** - * Check index templates. Check whether index templates exist. + * Check index templates. + *

+ * Check whether index templates exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template">Documentation * on elastic.co */ @@ -1101,7 +1197,7 @@ public final CompletableFuture existsIndexTemplate( * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template">Documentation * on elastic.co */ @@ -1125,7 +1221,7 @@ public CompletableFuture existsTemplate(ExistsTemplateRequest r * a function that initializes a builder to create the * {@link ExistsTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template">Documentation * on elastic.co */ @@ -1143,7 +1239,7 @@ public final CompletableFuture existsTemplate( * index, or any errors encountered during lifecycle execution. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle">Documentation * on elastic.co */ @@ -1164,7 +1260,7 @@ public CompletableFuture explainDataLifecycle(Expl * a function that initializes a builder to create the * {@link ExplainDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle">Documentation * on elastic.co */ @@ -1188,7 +1284,7 @@ public final CompletableFuture explainDataLifecycl * times. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats">Documentation * on elastic.co */ @@ -1215,7 +1311,7 @@ public CompletableFuture fieldUsageStats(FieldUsageStat * a function that initializes a builder to create the * {@link FieldUsageStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats">Documentation * on elastic.co */ @@ -1249,7 +1345,7 @@ public final CompletableFuture fieldUsageStats( * before the flush API was called. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1286,7 +1382,7 @@ public CompletableFuture flush(FlushRequest request) { * a function that initializes a builder to create the * {@link FlushRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1318,7 +1414,7 @@ public final CompletableFuture flush( * before the flush API was called. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1405,7 +1501,7 @@ public CompletableFuture flush() { * * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1495,7 +1591,7 @@ public CompletableFuture forcemerge(ForcemergeRequest reques * a function that initializes a builder to create the * {@link ForcemergeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1580,7 +1676,7 @@ public final CompletableFuture forcemerge( * * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1596,7 +1692,7 @@ public CompletableFuture forcemerge() { * streams, the API returns information about the stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get">Documentation * on elastic.co */ @@ -1615,7 +1711,7 @@ public CompletableFuture get(GetIndexRequest request) { * a function that initializes a builder to create the * {@link GetIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get">Documentation * on elastic.co */ @@ -1630,7 +1726,9 @@ public final CompletableFuture get( * Get aliases. Retrieves information for one or more data stream or index * aliases. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public CompletableFuture getAlias(GetAliasRequest request) { @@ -1647,7 +1745,9 @@ public CompletableFuture getAlias(GetAliasRequest request) { * @param fn * a function that initializes a builder to create the * {@link GetAliasRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final CompletableFuture getAlias( @@ -1659,7 +1759,9 @@ public final CompletableFuture getAlias( * Get aliases. Retrieves information for one or more data stream or index * aliases. * - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public CompletableFuture getAlias() { @@ -1670,11 +1772,12 @@ public CompletableFuture getAlias() { // ----- Endpoint: indices.get_data_lifecycle /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration - * of one or more data streams. + * Get data stream lifecycles. + *

+ * Get the data stream lifecycle configuration of one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle">Documentation * on elastic.co */ @@ -1686,14 +1789,15 @@ public CompletableFuture getDataLifecycle(GetDataLifec } /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration - * of one or more data streams. + * Get data stream lifecycles. + *

+ * Get the data stream lifecycle configuration of one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle">Documentation * on elastic.co */ @@ -1709,7 +1813,7 @@ public final CompletableFuture getDataLifecycle( * are managed by a data stream lifecycle. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats">Documentation * on elastic.co */ public CompletableFuture getDataLifecycleStats() { @@ -1720,10 +1824,12 @@ public CompletableFuture getDataLifecycleStats() // ----- Endpoint: indices.get_data_stream /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1735,13 +1841,15 @@ public CompletableFuture getDataStream(GetDataStreamReque } /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1751,10 +1859,12 @@ public final CompletableFuture getDataStream( } /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1774,7 +1884,7 @@ public CompletableFuture getDataStream() { * mapping contains a large number of fields. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1797,7 +1907,7 @@ public CompletableFuture getFieldMapping(GetFieldMappin * a function that initializes a builder to create the * {@link GetFieldMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1812,7 +1922,7 @@ public final CompletableFuture getFieldMapping( * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1830,7 +1940,7 @@ public CompletableFuture getIndexTemplate(GetIndexTemp * a function that initializes a builder to create the * {@link GetIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1843,7 +1953,7 @@ public final CompletableFuture getIndexTemplate( * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1859,7 +1969,7 @@ public CompletableFuture getIndexTemplate() { * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1878,7 +1988,7 @@ public CompletableFuture getMapping(GetMappingRequest reques * a function that initializes a builder to create the * {@link GetMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1892,7 +2002,7 @@ public final CompletableFuture getMapping( * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1901,6 +2011,44 @@ public CompletableFuture getMapping() { this.transportOptions); } + // ----- Endpoint: indices.get_migrate_reindex_status + + /** + * Get the migration reindexing status. + *

+ * Get the status of a migration reindex attempt for a data stream or index. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getMigrateReindexStatus( + GetMigrateReindexStatusRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetMigrateReindexStatusRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get the migration reindexing status. + *

+ * Get the status of a migration reindex attempt for a data stream or index. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetMigrateReindexStatusRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getMigrateReindexStatus( + Function> fn) { + return getMigrateReindexStatus(fn.apply(new GetMigrateReindexStatusRequest.Builder()).build()); + } + // ----- Endpoint: indices.get_settings /** @@ -1908,7 +2056,7 @@ public CompletableFuture getMapping() { * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1927,7 +2075,7 @@ public CompletableFuture getSettings(GetIndicesSetti * a function that initializes a builder to create the * {@link GetIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1941,7 +2089,7 @@ public final CompletableFuture getSettings( * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1960,7 +2108,7 @@ public CompletableFuture getSettings() { * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -1982,7 +2130,7 @@ public CompletableFuture getTemplate(GetTemplateRequest req * a function that initializes a builder to create the * {@link GetTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -1999,7 +2147,7 @@ public final CompletableFuture getTemplate( * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -2008,6 +2156,64 @@ public CompletableFuture getTemplate() { GetTemplateRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: indices.migrate_reindex + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture migrateReindex(MigrateReindexRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) MigrateReindexRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @param fn + * a function that initializes a builder to create the + * {@link MigrateReindexRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture migrateReindex( + Function> fn) { + return migrateReindex(fn.apply(new MigrateReindexRequest.Builder()).build()); + } + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture migrateReindex() { + return this.transport.performRequestAsync(new MigrateReindexRequest.Builder().build(), + MigrateReindexRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.migrate_to_data_stream /** @@ -2130,7 +2336,7 @@ public final CompletableFuture modifyDataStream( * _open and _close index actions as well. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open">Documentation * on elastic.co */ @@ -2180,7 +2386,7 @@ public CompletableFuture open(OpenRequest request) { * a function that initializes a builder to create the * {@link OpenRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open">Documentation * on elastic.co */ @@ -2472,7 +2678,7 @@ public final CompletableFuture putIndexTemplate( * name. Instead, add an alias field to create an alternate field name. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping">Documentation * on elastic.co */ @@ -2524,7 +2730,7 @@ public CompletableFuture putMapping(PutMappingRequest reques * a function that initializes a builder to create the * {@link PutMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping">Documentation * on elastic.co */ @@ -2557,7 +2763,7 @@ public final CompletableFuture putMapping( * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2593,7 +2799,7 @@ public CompletableFuture putSettings(PutIndicesSetti * a function that initializes a builder to create the * {@link PutIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2624,7 +2830,7 @@ public final CompletableFuture putSettings( * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2668,7 +2874,7 @@ public CompletableFuture putSettings() { * non-deterministic merging order. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template">Documentation * on elastic.co */ @@ -2715,7 +2921,7 @@ public CompletableFuture putTemplate(PutTemplateRequest req * a function that initializes a builder to create the * {@link PutTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template">Documentation * on elastic.co */ @@ -2763,7 +2969,7 @@ public final CompletableFuture putTemplate( * the recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2814,7 +3020,7 @@ public CompletableFuture recovery(RecoveryRequest request) { * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2860,7 +3066,7 @@ public final CompletableFuture recovery( * the recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2894,7 +3100,7 @@ public CompletableFuture recovery() { * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -2931,7 +3137,7 @@ public CompletableFuture refresh(RefreshRequest request) { * a function that initializes a builder to create the * {@link RefreshRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -2963,7 +3169,7 @@ public final CompletableFuture refresh( * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -2999,7 +3205,7 @@ public CompletableFuture refresh() { * in case shards are relocated in the future. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers">Documentation * on elastic.co */ @@ -3039,7 +3245,7 @@ public CompletableFuture reloadSearchAnalyzers( * a function that initializes a builder to create the * {@link ReloadSearchAnalyzersRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers">Documentation * on elastic.co */ @@ -3051,9 +3257,12 @@ public final CompletableFuture reloadSearchAnalyz // ----- Endpoint: indices.resolve_cluster /** - * Resolve the cluster. Resolve the specified index expressions to return - * information about each cluster, including the local cluster, if included. - * Multiple patterns and remote clusters are supported. + * Resolve the cluster. + *

+ * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. @@ -3065,7 +3274,9 @@ public final CompletableFuture reloadSearchAnalyz * For each cluster in the index expression, information is returned about: *

    *
  • Whether the querying ("local") cluster is currently connected - * to each remote cluster in the index expression scope.
  • + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint. *
  • Whether each remote cluster is configured with * skip_unavailable as true or * false.
  • @@ -3084,9 +3295,15 @@ public final CompletableFuture reloadSearchAnalyz * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. + *

    Note on backwards compatibility

    *

    - * Advantages of using this endpoint before a cross-cluster - * search + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

    Advantages of using this endpoint before a cross-cluster search

    *

    * You may want to exclude a cluster or index from a search when: *

      @@ -3107,9 +3324,23 @@ public final CompletableFuture reloadSearchAnalyz *
    • A remote cluster is an older version that does not support the feature * you want to use in your search.
    • *
    - * + *

    Test availability of remote clusters

    + *

    + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

    + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">Documentation * on elastic.co */ @@ -3121,9 +3352,12 @@ public CompletableFuture resolveCluster(ResolveClusterRe } /** - * Resolve the cluster. Resolve the specified index expressions to return - * information about each cluster, including the local cluster, if included. - * Multiple patterns and remote clusters are supported. + * Resolve the cluster. + *

    + * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. *

    * This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. @@ -3135,7 +3369,9 @@ public CompletableFuture resolveCluster(ResolveClusterRe * For each cluster in the index expression, information is returned about: *

      *
    • Whether the querying ("local") cluster is currently connected - * to each remote cluster in the index expression scope.
    • + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint. *
    • Whether each remote cluster is configured with * skip_unavailable as true or * false.
    • @@ -3154,9 +3390,15 @@ public CompletableFuture resolveCluster(ResolveClusterRe * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. + *

      Note on backwards compatibility

      *

      - * Advantages of using this endpoint before a cross-cluster - * search + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

      Advantages of using this endpoint before a cross-cluster search

      *

      * You may want to exclude a cluster or index from a search when: *

        @@ -3177,12 +3419,26 @@ public CompletableFuture resolveCluster(ResolveClusterRe *
      • A remote cluster is an older version that does not support the feature * you want to use in your search.
      • *
      - * + *

      Test availability of remote clusters

      + *

      + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

      + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">Documentation * on elastic.co */ @@ -3191,6 +3447,99 @@ public final CompletableFuture resolveCluster( return resolveCluster(fn.apply(new ResolveClusterRequest.Builder()).build()); } + /** + * Resolve the cluster. + *

      + * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. + *

      + * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

      + * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

      + * For each cluster in the index expression, information is returned about: + *

        + *
      • Whether the querying ("local") cluster is currently connected + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint.
      • + *
      • Whether each remote cluster is configured with + * skip_unavailable as true or + * false.
      • + *
      • Whether there are any indices, aliases, or data streams on that cluster + * that match the index expression.
      • + *
      • Whether the search is likely to have errors returned when you do the + * cross-cluster search (including any authorization errors if you do not have + * permission to query the index).
      • + *
      • Cluster version information, including the Elasticsearch server + * version.
      • + *
      + *

      + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

      Note on backwards compatibility

      + *

      + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

      Advantages of using this endpoint before a cross-cluster search

      + *

      + * You may want to exclude a cluster or index from a search when: + *

        + *
      • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
      • + *
      • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
      • + *
      • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
      • + *
      • A remote cluster is an older version that does not support the feature + * you want to use in your search.
      • + *
      + *

      Test availability of remote clusters

      + *

      + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

      + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture resolveCluster() { + return this.transport.performRequestAsync(new ResolveClusterRequest.Builder().build(), + ResolveClusterRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.resolve_index /** @@ -3199,7 +3548,7 @@ public final CompletableFuture resolveCluster( * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index">Documentation * on elastic.co */ @@ -3219,7 +3568,7 @@ public CompletableFuture resolveIndex(ResolveIndexRequest * a function that initializes a builder to create the * {@link ResolveIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index">Documentation * on elastic.co */ @@ -3284,7 +3633,7 @@ public final CompletableFuture resolveIndex( * 2099, the new index's name is my-index-2099.05.07-000002. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover">Documentation * on elastic.co */ @@ -3352,7 +3701,7 @@ public CompletableFuture rollover(RolloverRequest request) { * a function that initializes a builder to create the * {@link RolloverRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover">Documentation * on elastic.co */ @@ -3369,7 +3718,7 @@ public final CompletableFuture rollover( * stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3389,7 +3738,7 @@ public CompletableFuture segments(SegmentsRequest request) { * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3404,7 +3753,7 @@ public final CompletableFuture segments( * stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3433,7 +3782,7 @@ public CompletableFuture segments() { * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3465,7 +3814,7 @@ public CompletableFuture shardStores(ShardStoresRequest req * a function that initializes a builder to create the * {@link ShardStoresRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3492,7 +3841,7 @@ public final CompletableFuture shardStores( * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3561,7 +3910,7 @@ public CompletableFuture shardStores() { *

    * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink">Documentation * on elastic.co */ @@ -3633,7 +3982,7 @@ public CompletableFuture shrink(ShrinkRequest request) { * a function that initializes a builder to create the * {@link ShrinkRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink">Documentation * on elastic.co */ @@ -3649,7 +3998,7 @@ public final CompletableFuture shrink( * specified index from an existing index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template">Documentation * on elastic.co */ @@ -3669,7 +4018,7 @@ public CompletableFuture simulateIndexTemplate( * a function that initializes a builder to create the * {@link SimulateIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template">Documentation * on elastic.co */ @@ -3685,7 +4034,7 @@ public final CompletableFuture simulateIndexTempl * by a particular index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3704,7 +4053,7 @@ public CompletableFuture simulateTemplate(SimulateTemp * a function that initializes a builder to create the * {@link SimulateTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3718,7 +4067,7 @@ public final CompletableFuture simulateTemplate( * by a particular index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3795,7 +4144,7 @@ public CompletableFuture simulateTemplate() { *
* * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split">Documentation * on elastic.co */ @@ -3875,7 +4224,7 @@ public CompletableFuture split(SplitRequest request) { * a function that initializes a builder to create the * {@link SplitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split">Documentation * on elastic.co */ @@ -3904,7 +4253,7 @@ public final CompletableFuture split( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -3936,7 +4285,7 @@ public CompletableFuture stats(IndicesStatsRequest request * a function that initializes a builder to create the * {@link IndicesStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -3963,7 +4312,7 @@ public final CompletableFuture stats( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -3972,41 +4321,6 @@ public CompletableFuture stats() { IndicesStatsRequest._ENDPOINT, this.transportOptions); } - // ----- Endpoint: indices.unfreeze - - /** - * Unfreeze an index. When a frozen index is unfrozen, the index goes through - * the normal recovery process and becomes writeable again. - * - * @see Documentation - * on elastic.co - */ - - public CompletableFuture unfreeze(UnfreezeRequest request) { - @SuppressWarnings("unchecked") - JsonEndpoint endpoint = (JsonEndpoint) UnfreezeRequest._ENDPOINT; - - return this.transport.performRequestAsync(request, endpoint, this.transportOptions); - } - - /** - * Unfreeze an index. When a frozen index is unfrozen, the index goes through - * the normal recovery process and becomes writeable again. - * - * @param fn - * a function that initializes a builder to create the - * {@link UnfreezeRequest} - * @see Documentation - * on elastic.co - */ - - public final CompletableFuture unfreeze( - Function> fn) { - return unfreeze(fn.apply(new UnfreezeRequest.Builder()).build()); - } - // ----- Endpoint: indices.update_aliases /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java index 884c23eb7..7cda09cad 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java @@ -69,11 +69,13 @@ public ElasticsearchIndicesClient withTransportOptions(@Nullable TransportOption // ----- Endpoint: indices.add_block /** - * Add an index block. Limits the operations allowed on an index by blocking - * specific operation types. + * Add an index block. + *

+ * Add an index block to an index. Index blocks limit the operations allowed on + * an index by blocking specific operation types. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block">Documentation * on elastic.co */ @@ -85,14 +87,16 @@ public AddBlockResponse addBlock(AddBlockRequest request) throws IOException, El } /** - * Add an index block. Limits the operations allowed on an index by blocking - * specific operation types. + * Add an index block. + *

+ * Add an index block to an index. Index blocks limit the operations allowed on + * an index by blocking specific operation types. * * @param fn * a function that initializes a builder to create the * {@link AddBlockRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block">Documentation * on elastic.co */ @@ -114,7 +118,7 @@ public final AddBlockResponse addBlock(Function10000 as its limit. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -139,7 +143,7 @@ public AnalyzeResponse analyze(AnalyzeRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link AnalyzeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -159,7 +163,7 @@ public final AnalyzeResponse analyze(Function10000 as its limit. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze">Documentation * on elastic.co */ @@ -168,6 +172,45 @@ public AnalyzeResponse analyze() throws IOException, ElasticsearchException { this.transportOptions); } + // ----- Endpoint: indices.cancel_migrate_reindex + + /** + * Cancel a migration reindex operation. + *

+ * Cancel a migration reindex attempt for a data stream or index. + * + * @see Documentation + * on elastic.co + */ + + public CancelMigrateReindexResponse cancelMigrateReindex(CancelMigrateReindexRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) CancelMigrateReindexRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Cancel a migration reindex operation. + *

+ * Cancel a migration reindex attempt for a data stream or index. + * + * @param fn + * a function that initializes a builder to create the + * {@link CancelMigrateReindexRequest} + * @see Documentation + * on elastic.co + */ + + public final CancelMigrateReindexResponse cancelMigrateReindex( + Function> fn) + throws IOException, ElasticsearchException { + return cancelMigrateReindex(fn.apply(new CancelMigrateReindexRequest.Builder()).build()); + } + // ----- Endpoint: indices.clear_cache /** @@ -180,7 +223,7 @@ public AnalyzeResponse analyze() throws IOException, ElasticsearchException { * use the fields parameter. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -204,7 +247,7 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link ClearCacheRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -223,7 +266,7 @@ public final ClearCacheResponse clearCache(Functionfields parameter. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache">Documentation * on elastic.co */ @@ -304,7 +347,7 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio * action as well. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone">Documentation * on elastic.co */ @@ -388,7 +431,7 @@ public CloneIndexResponse clone(CloneIndexRequest request) throws IOException, E * a function that initializes a builder to create the * {@link CloneIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone">Documentation * on elastic.co */ @@ -429,7 +472,7 @@ public final CloneIndexResponse clone(Functionfalse. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close">Documentation * on elastic.co */ @@ -473,7 +516,7 @@ public CloseIndexResponse close(CloseIndexRequest request) throws IOException, E * a function that initializes a builder to create the * {@link CloseIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close">Documentation * on elastic.co */ @@ -519,7 +562,7 @@ public final CloseIndexResponse close(Functionwait_for_active_shards value on all subsequent write operations. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create">Documentation * on elastic.co */ @@ -568,7 +611,7 @@ public CreateIndexResponse create(CreateIndexRequest request) throws IOException * a function that initializes a builder to create the * {@link CreateIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create">Documentation * on elastic.co */ @@ -580,11 +623,12 @@ public final CreateIndexResponse create(Function + * You must have a matching index template with data stream enabled. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream">Documentation * on elastic.co */ @@ -597,14 +641,15 @@ public CreateDataStreamResponse createDataStream(CreateDataStreamRequest request } /** - * Create a data stream. Creates a data stream. You must have a matching index - * template with data stream enabled. + * Create a data stream. + *

+ * You must have a matching index template with data stream enabled. * * @param fn * a function that initializes a builder to create the * {@link CreateDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream">Documentation * on elastic.co */ @@ -614,13 +659,54 @@ public final CreateDataStreamResponse createDataStream( return createDataStream(fn.apply(new CreateDataStreamRequest.Builder()).build()); } + // ----- Endpoint: indices.create_from + + /** + * Create an index from a source index. + *

+ * Copy the mappings and settings from the source index to a destination index + * while allowing request settings and mappings to override the source values. + * + * @see Documentation + * on elastic.co + */ + + public CreateFromResponse createFrom(CreateFromRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) CreateFromRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Create an index from a source index. + *

+ * Copy the mappings and settings from the source index to a destination index + * while allowing request settings and mappings to override the source values. + * + * @param fn + * a function that initializes a builder to create the + * {@link CreateFromRequest} + * @see Documentation + * on elastic.co + */ + + public final CreateFromResponse createFrom(Function> fn) + throws IOException, ElasticsearchException { + return createFrom(fn.apply(new CreateFromRequest.Builder()).build()); + } + // ----- Endpoint: indices.data_streams_stats /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -633,13 +719,15 @@ public DataStreamsStatsResponse dataStreamsStats(DataStreamsStatsRequest request } /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link DataStreamsStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -650,10 +738,12 @@ public final DataStreamsStatsResponse dataStreamsStats( } /** - * Get data stream stats. Retrieves statistics for one or more data streams. + * Get data stream stats. + *

+ * Get statistics for one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1">Documentation * on elastic.co */ @@ -674,7 +764,7 @@ public DataStreamsStatsResponse dataStreamsStats() throws IOException, Elasticse * You can then use the delete index API to delete the previous write index. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete">Documentation * on elastic.co */ @@ -698,7 +788,7 @@ public DeleteIndexResponse delete(DeleteIndexRequest request) throws IOException * a function that initializes a builder to create the * {@link DeleteIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete">Documentation * on elastic.co */ @@ -713,7 +803,7 @@ public final DeleteIndexResponse delete(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias">Documentation * on elastic.co */ @@ -731,7 +821,7 @@ public DeleteAliasResponse deleteAlias(DeleteAliasRequest request) throws IOExce * a function that initializes a builder to create the * {@link DeleteAliasRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias">Documentation * on elastic.co */ @@ -748,7 +838,7 @@ public final DeleteAliasResponse deleteAlias( * stream, rendering it not managed by the data stream lifecycle. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle">Documentation * on elastic.co */ @@ -768,7 +858,7 @@ public DeleteDataLifecycleResponse deleteDataLifecycle(DeleteDataLifecycleReques * a function that initializes a builder to create the * {@link DeleteDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle">Documentation * on elastic.co */ @@ -785,7 +875,7 @@ public final DeleteDataLifecycleResponse deleteDataLifecycle( * indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream">Documentation * on elastic.co */ @@ -805,7 +895,7 @@ public DeleteDataStreamResponse deleteDataStream(DeleteDataStreamRequest request * a function that initializes a builder to create the * {@link DeleteDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream">Documentation * on elastic.co */ @@ -824,7 +914,7 @@ public final DeleteDataStreamResponse deleteDataStream( * match completely with existing templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template">Documentation * on elastic.co */ @@ -846,7 +936,7 @@ public DeleteIndexTemplateResponse deleteIndexTemplate(DeleteIndexTemplateReques * a function that initializes a builder to create the * {@link DeleteIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template">Documentation * on elastic.co */ @@ -862,7 +952,7 @@ public final DeleteIndexTemplateResponse deleteIndexTemplate( * Delete a legacy index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template">Documentation * on elastic.co */ @@ -881,7 +971,7 @@ public DeleteTemplateResponse deleteTemplate(DeleteTemplateRequest request) * a function that initializes a builder to create the * {@link DeleteTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template">Documentation * on elastic.co */ @@ -908,7 +998,7 @@ public final DeleteTemplateResponse deleteTemplate( * underestimated while the _source field is overestimated. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage">Documentation * on elastic.co */ @@ -937,7 +1027,7 @@ public DiskUsageResponse diskUsage(DiskUsageRequest request) throws IOException, * a function that initializes a builder to create the * {@link DiskUsageRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage">Documentation * on elastic.co */ @@ -962,7 +1052,7 @@ public final DiskUsageResponse diskUsage(Functionindex.blocks.write: true). * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample">Documentation * on elastic.co */ @@ -990,7 +1080,7 @@ public DownsampleResponse downsample(DownsampleRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link DownsampleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample">Documentation * on elastic.co */ @@ -1006,7 +1096,7 @@ public final DownsampleResponse downsample(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists">Documentation * on elastic.co */ @@ -1025,7 +1115,7 @@ public BooleanResponse exists(ExistsRequest request) throws IOException, Elastic * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists">Documentation * on elastic.co */ @@ -1037,10 +1127,12 @@ public final BooleanResponse exists(Function + * Check if one or more data stream or index aliases exist. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias">Documentation * on elastic.co */ @@ -1052,13 +1144,15 @@ public BooleanResponse existsAlias(ExistsAliasRequest request) throws IOExceptio } /** - * Check aliases. Checks if one or more data stream or index aliases exist. + * Check aliases. + *

+ * Check if one or more data stream or index aliases exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsAliasRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias">Documentation * on elastic.co */ @@ -1070,10 +1164,12 @@ public final BooleanResponse existsAlias(Function + * Check whether index templates exist. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template">Documentation * on elastic.co */ @@ -1086,13 +1182,15 @@ public BooleanResponse existsIndexTemplate(ExistsIndexTemplateRequest request) } /** - * Check index templates. Check whether index templates exist. + * Check index templates. + *

+ * Check whether index templates exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template">Documentation * on elastic.co */ @@ -1114,7 +1212,7 @@ public final BooleanResponse existsIndexTemplate( * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template">Documentation * on elastic.co */ @@ -1138,7 +1236,7 @@ public BooleanResponse existsTemplate(ExistsTemplateRequest request) throws IOEx * a function that initializes a builder to create the * {@link ExistsTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template">Documentation * on elastic.co */ @@ -1157,7 +1255,7 @@ public final BooleanResponse existsTemplate( * index, or any errors encountered during lifecycle execution. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle">Documentation * on elastic.co */ @@ -1179,7 +1277,7 @@ public ExplainDataLifecycleResponse explainDataLifecycle(ExplainDataLifecycleReq * a function that initializes a builder to create the * {@link ExplainDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle">Documentation * on elastic.co */ @@ -1204,7 +1302,7 @@ public final ExplainDataLifecycleResponse explainDataLifecycle( * times. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats">Documentation * on elastic.co */ @@ -1232,7 +1330,7 @@ public FieldUsageStatsResponse fieldUsageStats(FieldUsageStatsRequest request) * a function that initializes a builder to create the * {@link FieldUsageStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats">Documentation * on elastic.co */ @@ -1267,7 +1365,7 @@ public final FieldUsageStatsResponse fieldUsageStats( * before the flush API was called. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1304,7 +1402,7 @@ public FlushResponse flush(FlushRequest request) throws IOException, Elasticsear * a function that initializes a builder to create the * {@link FlushRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1336,7 +1434,7 @@ public final FlushResponse flush(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush">Documentation * on elastic.co */ @@ -1423,7 +1521,7 @@ public FlushResponse flush() throws IOException, ElasticsearchException { * * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1513,7 +1611,7 @@ public ForcemergeResponse forcemerge(ForcemergeRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link ForcemergeRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1598,7 +1696,7 @@ public final ForcemergeResponse forcemerge(Function * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge">Documentation * on elastic.co */ @@ -1614,7 +1712,7 @@ public ForcemergeResponse forcemerge() throws IOException, ElasticsearchExceptio * streams, the API returns information about the stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get">Documentation * on elastic.co */ @@ -1633,7 +1731,7 @@ public GetIndexResponse get(GetIndexRequest request) throws IOException, Elastic * a function that initializes a builder to create the * {@link GetIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get">Documentation * on elastic.co */ @@ -1648,7 +1746,9 @@ public final GetIndexResponse get(FunctionDocumentation on elastic.co + * @see Documentation + * on elastic.co */ public GetAliasResponse getAlias(GetAliasRequest request) throws IOException, ElasticsearchException { @@ -1665,7 +1765,9 @@ public GetAliasResponse getAlias(GetAliasRequest request) throws IOException, El * @param fn * a function that initializes a builder to create the * {@link GetAliasRequest} - * @see Documentation on elastic.co + * @see Documentation + * on elastic.co */ public final GetAliasResponse getAlias(Function> fn) @@ -1677,7 +1779,9 @@ public final GetAliasResponse getAlias(FunctionDocumentation on elastic.co + * @see Documentation + * on elastic.co */ public GetAliasResponse getAlias() throws IOException, ElasticsearchException { @@ -1688,11 +1792,12 @@ public GetAliasResponse getAlias() throws IOException, ElasticsearchException { // ----- Endpoint: indices.get_data_lifecycle /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration - * of one or more data streams. + * Get data stream lifecycles. + *

+ * Get the data stream lifecycle configuration of one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle">Documentation * on elastic.co */ @@ -1705,14 +1810,15 @@ public GetDataLifecycleResponse getDataLifecycle(GetDataLifecycleRequest request } /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration - * of one or more data streams. + * Get data stream lifecycles. + *

+ * Get the data stream lifecycle configuration of one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataLifecycleRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle">Documentation * on elastic.co */ @@ -1729,7 +1835,7 @@ public final GetDataLifecycleResponse getDataLifecycle( * are managed by a data stream lifecycle. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats">Documentation * on elastic.co */ public GetDataLifecycleStatsResponse getDataLifecycleStats() throws IOException, ElasticsearchException { @@ -1740,10 +1846,12 @@ public GetDataLifecycleStatsResponse getDataLifecycleStats() throws IOException, // ----- Endpoint: indices.get_data_stream /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1756,13 +1864,15 @@ public GetDataStreamResponse getDataStream(GetDataStreamRequest request) } /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @param fn * a function that initializes a builder to create the * {@link GetDataStreamRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1773,10 +1883,12 @@ public final GetDataStreamResponse getDataStream( } /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream">Documentation * on elastic.co */ @@ -1796,7 +1908,7 @@ public GetDataStreamResponse getDataStream() throws IOException, ElasticsearchEx * mapping contains a large number of fields. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1820,7 +1932,7 @@ public GetFieldMappingResponse getFieldMapping(GetFieldMappingRequest request) * a function that initializes a builder to create the * {@link GetFieldMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1836,7 +1948,7 @@ public final GetFieldMappingResponse getFieldMapping( * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1855,7 +1967,7 @@ public GetIndexTemplateResponse getIndexTemplate(GetIndexTemplateRequest request * a function that initializes a builder to create the * {@link GetIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1869,7 +1981,7 @@ public final GetIndexTemplateResponse getIndexTemplate( * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template">Documentation * on elastic.co */ @@ -1885,7 +1997,7 @@ public GetIndexTemplateResponse getIndexTemplate() throws IOException, Elasticse * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1904,7 +2016,7 @@ public GetMappingResponse getMapping(GetMappingRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link GetMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1918,7 +2030,7 @@ public final GetMappingResponse getMapping(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping">Documentation * on elastic.co */ @@ -1927,6 +2039,45 @@ public GetMappingResponse getMapping() throws IOException, ElasticsearchExceptio this.transportOptions); } + // ----- Endpoint: indices.get_migrate_reindex_status + + /** + * Get the migration reindexing status. + *

+ * Get the status of a migration reindex attempt for a data stream or index. + * + * @see Documentation + * on elastic.co + */ + + public GetMigrateReindexStatusResponse getMigrateReindexStatus(GetMigrateReindexStatusRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetMigrateReindexStatusRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Get the migration reindexing status. + *

+ * Get the status of a migration reindex attempt for a data stream or index. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetMigrateReindexStatusRequest} + * @see Documentation + * on elastic.co + */ + + public final GetMigrateReindexStatusResponse getMigrateReindexStatus( + Function> fn) + throws IOException, ElasticsearchException { + return getMigrateReindexStatus(fn.apply(new GetMigrateReindexStatusRequest.Builder()).build()); + } + // ----- Endpoint: indices.get_settings /** @@ -1934,7 +2085,7 @@ public GetMappingResponse getMapping() throws IOException, ElasticsearchExceptio * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1954,7 +2105,7 @@ public GetIndicesSettingsResponse getSettings(GetIndicesSettingsRequest request) * a function that initializes a builder to create the * {@link GetIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1969,7 +2120,7 @@ public final GetIndicesSettingsResponse getSettings( * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings">Documentation * on elastic.co */ @@ -1988,7 +2139,7 @@ public GetIndicesSettingsResponse getSettings() throws IOException, Elasticsearc * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -2010,7 +2161,7 @@ public GetTemplateResponse getTemplate(GetTemplateRequest request) throws IOExce * a function that initializes a builder to create the * {@link GetTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -2028,7 +2179,7 @@ public final GetTemplateResponse getTemplate( * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template">Documentation * on elastic.co */ @@ -2037,6 +2188,66 @@ public GetTemplateResponse getTemplate() throws IOException, ElasticsearchExcept this.transportOptions); } + // ----- Endpoint: indices.migrate_reindex + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @see Documentation + * on elastic.co + */ + + public MigrateReindexResponse migrateReindex(MigrateReindexRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) MigrateReindexRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @param fn + * a function that initializes a builder to create the + * {@link MigrateReindexRequest} + * @see Documentation + * on elastic.co + */ + + public final MigrateReindexResponse migrateReindex( + Function> fn) + throws IOException, ElasticsearchException { + return migrateReindex(fn.apply(new MigrateReindexRequest.Builder()).build()); + } + + /** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @see Documentation + * on elastic.co + */ + + public MigrateReindexResponse migrateReindex() throws IOException, ElasticsearchException { + return this.transport.performRequest(new MigrateReindexRequest.Builder().build(), + MigrateReindexRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.migrate_to_data_stream /** @@ -2163,7 +2374,7 @@ public final ModifyDataStreamResponse modifyDataStream( * _open and _close index actions as well. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open">Documentation * on elastic.co */ @@ -2213,7 +2424,7 @@ public OpenResponse open(OpenRequest request) throws IOException, ElasticsearchE * a function that initializes a builder to create the * {@link OpenRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open">Documentation * on elastic.co */ @@ -2512,7 +2723,7 @@ public final PutIndexTemplateResponse putIndexTemplate( * name. Instead, add an alias field to create an alternate field name. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping">Documentation * on elastic.co */ @@ -2564,7 +2775,7 @@ public PutMappingResponse putMapping(PutMappingRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link PutMappingRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping">Documentation * on elastic.co */ @@ -2597,7 +2808,7 @@ public final PutMappingResponse putMapping(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2634,7 +2845,7 @@ public PutIndicesSettingsResponse putSettings(PutIndicesSettingsRequest request) * a function that initializes a builder to create the * {@link PutIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2666,7 +2877,7 @@ public final PutIndicesSettingsResponse putSettings( * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings">Documentation * on elastic.co */ @@ -2710,7 +2921,7 @@ public PutIndicesSettingsResponse putSettings() throws IOException, Elasticsearc * non-deterministic merging order. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template">Documentation * on elastic.co */ @@ -2757,7 +2968,7 @@ public PutTemplateResponse putTemplate(PutTemplateRequest request) throws IOExce * a function that initializes a builder to create the * {@link PutTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template">Documentation * on elastic.co */ @@ -2806,7 +3017,7 @@ public final PutTemplateResponse putTemplate( * the recovery API. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2857,7 +3068,7 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2903,7 +3114,7 @@ public final RecoveryResponse recovery(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery">Documentation * on elastic.co */ @@ -2937,7 +3148,7 @@ public RecoveryResponse recovery() throws IOException, ElasticsearchException { * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -2974,7 +3185,7 @@ public RefreshResponse refresh(RefreshRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link RefreshRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -3006,7 +3217,7 @@ public final RefreshResponse refresh(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh">Documentation * on elastic.co */ @@ -3042,7 +3253,7 @@ public RefreshResponse refresh() throws IOException, ElasticsearchException { * in case shards are relocated in the future. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers">Documentation * on elastic.co */ @@ -3082,7 +3293,7 @@ public ReloadSearchAnalyzersResponse reloadSearchAnalyzers(ReloadSearchAnalyzers * a function that initializes a builder to create the * {@link ReloadSearchAnalyzersRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers">Documentation * on elastic.co */ @@ -3095,9 +3306,12 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( // ----- Endpoint: indices.resolve_cluster /** - * Resolve the cluster. Resolve the specified index expressions to return - * information about each cluster, including the local cluster, if included. - * Multiple patterns and remote clusters are supported. + * Resolve the cluster. + *

+ * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. @@ -3109,7 +3323,9 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( * For each cluster in the index expression, information is returned about: *

    *
  • Whether the querying ("local") cluster is currently connected - * to each remote cluster in the index expression scope.
  • + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint. *
  • Whether each remote cluster is configured with * skip_unavailable as true or * false.
  • @@ -3128,9 +3344,15 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. + *

    Note on backwards compatibility

    *

    - * Advantages of using this endpoint before a cross-cluster - * search + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

    Advantages of using this endpoint before a cross-cluster search

    *

    * You may want to exclude a cluster or index from a search when: *

      @@ -3151,9 +3373,23 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( *
    • A remote cluster is an older version that does not support the feature * you want to use in your search.
    • *
    - * + *

    Test availability of remote clusters

    + *

    + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

    + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">Documentation * on elastic.co */ @@ -3166,9 +3402,12 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) } /** - * Resolve the cluster. Resolve the specified index expressions to return - * information about each cluster, including the local cluster, if included. - * Multiple patterns and remote clusters are supported. + * Resolve the cluster. + *

    + * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. *

    * This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. @@ -3180,7 +3419,9 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) * For each cluster in the index expression, information is returned about: *

      *
    • Whether the querying ("local") cluster is currently connected - * to each remote cluster in the index expression scope.
    • + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint. *
    • Whether each remote cluster is configured with * skip_unavailable as true or * false.
    • @@ -3199,9 +3440,15 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) * start with the alias cluster*. Each cluster returns information * about whether it has any indices, aliases or data streams that match * my-index-*. + *

      Note on backwards compatibility

      *

      - * Advantages of using this endpoint before a cross-cluster - * search + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

      Advantages of using this endpoint before a cross-cluster search

      *

      * You may want to exclude a cluster or index from a search when: *

        @@ -3222,12 +3469,26 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) *
      • A remote cluster is an older version that does not support the feature * you want to use in your search.
      • *
      - * + *

      Test availability of remote clusters

      + *

      + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

      + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">Documentation * on elastic.co */ @@ -3237,6 +3498,99 @@ public final ResolveClusterResponse resolveCluster( return resolveCluster(fn.apply(new ResolveClusterRequest.Builder()).build()); } + /** + * Resolve the cluster. + *

      + * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. + *

      + * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

      + * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

      + * For each cluster in the index expression, information is returned about: + *

        + *
      • Whether the querying ("local") cluster is currently connected + * to each remote cluster specified in the index expression. Note that this + * endpoint actively attempts to contact the remote clusters, unlike the + * remote/info endpoint.
      • + *
      • Whether each remote cluster is configured with + * skip_unavailable as true or + * false.
      • + *
      • Whether there are any indices, aliases, or data streams on that cluster + * that match the index expression.
      • + *
      • Whether the search is likely to have errors returned when you do the + * cross-cluster search (including any authorization errors if you do not have + * permission to query the index).
      • + *
      • Cluster version information, including the Elasticsearch server + * version.
      • + *
      + *

      + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

      Note on backwards compatibility

      + *

      + * The ability to query without an index expression was added in version 8.18, + * so when querying remote clusters older than that, the local cluster will send + * the index expression dummy* to those remote clusters. Thus, if + * an errors occur, you may see a reference to that index expression even though + * you didn't request it. If it causes a problem, you can instead include an + * index expression like *:* to bypass the issue. + *

      Advantages of using this endpoint before a cross-cluster search

      + *

      + * You may want to exclude a cluster or index from a search when: + *

        + *
      • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
      • + *
      • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
      • + *
      • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
      • + *
      • A remote cluster is an older version that does not support the feature + * you want to use in your search.
      • + *
      + *

      Test availability of remote clusters

      + *

      + * The remote/info endpoint is commonly used to test whether the + * "local" cluster (the cluster being queried) is connected to its + * remote clusters, but it does not necessarily reflect whether the remote + * cluster is available or not. The remote cluster may be available, while the + * local cluster is not currently connected to it. + *

      + * You can use the _resolve/cluster API to attempt to reconnect to + * remote clusters. For example with GET _resolve/cluster or + * GET _resolve/cluster/*:*. The connected field in + * the response will indicate whether it was successful. If a connection was + * (re-)established, this will also cause the remote/info endpoint + * to now indicate a connected status. + * + * @see Documentation + * on elastic.co + */ + + public ResolveClusterResponse resolveCluster() throws IOException, ElasticsearchException { + return this.transport.performRequest(new ResolveClusterRequest.Builder().build(), + ResolveClusterRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.resolve_index /** @@ -3245,7 +3599,7 @@ public final ResolveClusterResponse resolveCluster( * supported. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index">Documentation * on elastic.co */ @@ -3265,7 +3619,7 @@ public ResolveIndexResponse resolveIndex(ResolveIndexRequest request) throws IOE * a function that initializes a builder to create the * {@link ResolveIndexRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index">Documentation * on elastic.co */ @@ -3331,7 +3685,7 @@ public final ResolveIndexResponse resolveIndex( * 2099, the new index's name is my-index-2099.05.07-000002. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover">Documentation * on elastic.co */ @@ -3399,7 +3753,7 @@ public RolloverResponse rollover(RolloverRequest request) throws IOException, El * a function that initializes a builder to create the * {@link RolloverRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover">Documentation * on elastic.co */ @@ -3416,7 +3770,7 @@ public final RolloverResponse rollover(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3436,7 +3790,7 @@ public SegmentsResponse segments(SegmentsRequest request) throws IOException, El * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3451,7 +3805,7 @@ public final SegmentsResponse segments(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments">Documentation * on elastic.co */ @@ -3480,7 +3834,7 @@ public SegmentsResponse segments() throws IOException, ElasticsearchException { * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3512,7 +3866,7 @@ public ShardStoresResponse shardStores(ShardStoresRequest request) throws IOExce * a function that initializes a builder to create the * {@link ShardStoresRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3540,7 +3894,7 @@ public final ShardStoresResponse shardStores( * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores">Documentation * on elastic.co */ @@ -3609,7 +3963,7 @@ public ShardStoresResponse shardStores() throws IOException, ElasticsearchExcept *

    * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink">Documentation * on elastic.co */ @@ -3681,7 +4035,7 @@ public ShrinkResponse shrink(ShrinkRequest request) throws IOException, Elastics * a function that initializes a builder to create the * {@link ShrinkRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink">Documentation * on elastic.co */ @@ -3697,7 +4051,7 @@ public final ShrinkResponse shrink(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template">Documentation * on elastic.co */ @@ -3717,7 +4071,7 @@ public SimulateIndexTemplateResponse simulateIndexTemplate(SimulateIndexTemplate * a function that initializes a builder to create the * {@link SimulateIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template">Documentation * on elastic.co */ @@ -3734,7 +4088,7 @@ public final SimulateIndexTemplateResponse simulateIndexTemplate( * by a particular index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3754,7 +4108,7 @@ public SimulateTemplateResponse simulateTemplate(SimulateTemplateRequest request * a function that initializes a builder to create the * {@link SimulateTemplateRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3769,7 +4123,7 @@ public final SimulateTemplateResponse simulateTemplate( * by a particular index template. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template">Documentation * on elastic.co */ @@ -3846,7 +4200,7 @@ public SimulateTemplateResponse simulateTemplate() throws IOException, Elasticse *
* * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split">Documentation * on elastic.co */ @@ -3926,7 +4280,7 @@ public SplitResponse split(SplitRequest request) throws IOException, Elasticsear * a function that initializes a builder to create the * {@link SplitRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split">Documentation * on elastic.co */ @@ -3955,7 +4309,7 @@ public final SplitResponse split(FunctionDocumentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -3987,7 +4341,7 @@ public IndicesStatsResponse stats(IndicesStatsRequest request) throws IOExceptio * a function that initializes a builder to create the * {@link IndicesStatsRequest} * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -4015,7 +4369,7 @@ public final IndicesStatsResponse stats( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats">Documentation * on elastic.co */ @@ -4024,41 +4378,6 @@ public IndicesStatsResponse stats() throws IOException, ElasticsearchException { this.transportOptions); } - // ----- Endpoint: indices.unfreeze - - /** - * Unfreeze an index. When a frozen index is unfrozen, the index goes through - * the normal recovery process and becomes writeable again. - * - * @see Documentation - * on elastic.co - */ - - public UnfreezeResponse unfreeze(UnfreezeRequest request) throws IOException, ElasticsearchException { - @SuppressWarnings("unchecked") - JsonEndpoint endpoint = (JsonEndpoint) UnfreezeRequest._ENDPOINT; - - return this.transport.performRequest(request, endpoint, this.transportOptions); - } - - /** - * Unfreeze an index. When a frozen index is unfrozen, the index goes through - * the normal recovery process and becomes writeable again. - * - * @param fn - * a function that initializes a builder to create the - * {@link UnfreezeRequest} - * @see Documentation - * on elastic.co - */ - - public final UnfreezeResponse unfreeze(Function> fn) - throws IOException, ElasticsearchException { - return unfreeze(fn.apply(new UnfreezeRequest.Builder()).build()); - } - // ----- Endpoint: indices.update_aliases /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java index afa873138..789464b0d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java @@ -62,7 +62,9 @@ // typedef: indices.exists_alias.Request /** - * Check aliases. Checks if one or more data stream or index aliases exist. + * Check aliases. + *

+ * Check if one or more data stream or index aliases exist. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsIndexTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsIndexTemplateRequest.java index 41bc9454b..72649f910 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsIndexTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsIndexTemplateRequest.java @@ -58,7 +58,9 @@ // typedef: indices.exists_index_template.Request /** - * Check index templates. Check whether index templates exist. + * Check index templates. + *

+ * Check whether index templates exist. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java index 7ae7c3df5..5760e21a6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java @@ -60,8 +60,9 @@ // typedef: indices.get_data_lifecycle.Request /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration - * of one or more data streams. + * Get data stream lifecycles. + *

+ * Get the data stream lifecycle configuration of one or more data streams. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamRequest.java index 18fb316d6..52d0315e6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamRequest.java @@ -60,7 +60,9 @@ // typedef: indices.get_data_stream.Request /** - * Get data streams. Retrieves information about one or more data streams. + * Get data streams. + *

+ * Get information about one or more data streams. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusRequest.java new file mode 100644 index 000000000..7fd5291b2 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusRequest.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_migrate_reindex_status.Request + +/** + * Get the migration reindexing status. + *

+ * Get the status of a migration reindex attempt for a data stream or index. + * + * @see API + * specification + */ + +public class GetMigrateReindexStatusRequest extends RequestBase { + private final List index; + + // --------------------------------------------------------------------------------------------- + + private GetMigrateReindexStatusRequest(Builder builder) { + + this.index = ApiTypeHelper.unmodifiableRequired(builder.index, this, "index"); + + } + + public static GetMigrateReindexStatusRequest of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The index or data stream name. + *

+ * API name: {@code index} + */ + public final List index() { + return this.index; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetMigrateReindexStatusRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private List index; + + /** + * Required - The index or data stream name. + *

+ * API name: {@code index} + *

+ * Adds all elements of list to index. + */ + public final Builder index(List list) { + this.index = _listAddAll(this.index, list); + return this; + } + + /** + * Required - The index or data stream name. + *

+ * API name: {@code index} + *

+ * Adds one or more values to index. + */ + public final Builder index(String value, String... values) { + this.index = _listAdd(this.index, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetMigrateReindexStatusRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetMigrateReindexStatusRequest build() { + _checkSingleUse(); + + return new GetMigrateReindexStatusRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.get_migrate_reindex_status}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.get_migrate_reindex_status", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + final int _index = 1 << 0; + + int propsSet = 0; + + propsSet |= _index; + + if (propsSet == (_index)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_migration"); + buf.append("/reindex"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.index.stream().map(v -> v).collect(Collectors.joining(",")), buf); + buf.append("/_status"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _index = 1 << 0; + + int propsSet = 0; + + propsSet |= _index; + + if (propsSet == (_index)) { + params.put("index", request.index.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), false, GetMigrateReindexStatusResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusResponse.java new file mode 100644 index 000000000..02bd05746 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMigrateReindexStatusResponse.java @@ -0,0 +1,451 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch.indices.get_migrate_reindex_status.StatusError; +import co.elastic.clients.elasticsearch.indices.get_migrate_reindex_status.StatusInProgress; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.DateTime; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.Long; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_migrate_reindex_status.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class GetMigrateReindexStatusResponse implements JsonpSerializable { + @Nullable + private final DateTime startTime; + + private final long startTimeMillis; + + private final boolean complete; + + private final int totalIndicesInDataStream; + + private final int totalIndicesRequiringUpgrade; + + private final int successes; + + private final List inProgress; + + private final int pending; + + private final List errors; + + @Nullable + private final String exception; + + // --------------------------------------------------------------------------------------------- + + private GetMigrateReindexStatusResponse(Builder builder) { + + this.startTime = builder.startTime; + this.startTimeMillis = ApiTypeHelper.requireNonNull(builder.startTimeMillis, this, "startTimeMillis"); + this.complete = ApiTypeHelper.requireNonNull(builder.complete, this, "complete"); + this.totalIndicesInDataStream = ApiTypeHelper.requireNonNull(builder.totalIndicesInDataStream, this, + "totalIndicesInDataStream"); + this.totalIndicesRequiringUpgrade = ApiTypeHelper.requireNonNull(builder.totalIndicesRequiringUpgrade, this, + "totalIndicesRequiringUpgrade"); + this.successes = ApiTypeHelper.requireNonNull(builder.successes, this, "successes"); + this.inProgress = ApiTypeHelper.unmodifiableRequired(builder.inProgress, this, "inProgress"); + this.pending = ApiTypeHelper.requireNonNull(builder.pending, this, "pending"); + this.errors = ApiTypeHelper.unmodifiableRequired(builder.errors, this, "errors"); + this.exception = builder.exception; + + } + + public static GetMigrateReindexStatusResponse of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * API name: {@code start_time} + */ + @Nullable + public final DateTime startTime() { + return this.startTime; + } + + /** + * Required - API name: {@code start_time_millis} + */ + public final long startTimeMillis() { + return this.startTimeMillis; + } + + /** + * Required - API name: {@code complete} + */ + public final boolean complete() { + return this.complete; + } + + /** + * Required - API name: {@code total_indices_in_data_stream} + */ + public final int totalIndicesInDataStream() { + return this.totalIndicesInDataStream; + } + + /** + * Required - API name: {@code total_indices_requiring_upgrade} + */ + public final int totalIndicesRequiringUpgrade() { + return this.totalIndicesRequiringUpgrade; + } + + /** + * Required - API name: {@code successes} + */ + public final int successes() { + return this.successes; + } + + /** + * Required - API name: {@code in_progress} + */ + public final List inProgress() { + return this.inProgress; + } + + /** + * Required - API name: {@code pending} + */ + public final int pending() { + return this.pending; + } + + /** + * Required - API name: {@code errors} + */ + public final List errors() { + return this.errors; + } + + /** + * API name: {@code exception} + */ + @Nullable + public final String exception() { + return this.exception; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.startTime != null) { + generator.writeKey("start_time"); + this.startTime.serialize(generator, mapper); + } + generator.writeKey("start_time_millis"); + generator.write(this.startTimeMillis); + + generator.writeKey("complete"); + generator.write(this.complete); + + generator.writeKey("total_indices_in_data_stream"); + generator.write(this.totalIndicesInDataStream); + + generator.writeKey("total_indices_requiring_upgrade"); + generator.write(this.totalIndicesRequiringUpgrade); + + generator.writeKey("successes"); + generator.write(this.successes); + + if (ApiTypeHelper.isDefined(this.inProgress)) { + generator.writeKey("in_progress"); + generator.writeStartArray(); + for (StatusInProgress item0 : this.inProgress) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("pending"); + generator.write(this.pending); + + if (ApiTypeHelper.isDefined(this.errors)) { + generator.writeKey("errors"); + generator.writeStartArray(); + for (StatusError item0 : this.errors) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (this.exception != null) { + generator.writeKey("exception"); + generator.write(this.exception); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetMigrateReindexStatusResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private DateTime startTime; + + private Long startTimeMillis; + + private Boolean complete; + + private Integer totalIndicesInDataStream; + + private Integer totalIndicesRequiringUpgrade; + + private Integer successes; + + private List inProgress; + + private Integer pending; + + private List errors; + + @Nullable + private String exception; + + /** + * API name: {@code start_time} + */ + public final Builder startTime(@Nullable DateTime value) { + this.startTime = value; + return this; + } + + /** + * Required - API name: {@code start_time_millis} + */ + public final Builder startTimeMillis(long value) { + this.startTimeMillis = value; + return this; + } + + /** + * Required - API name: {@code complete} + */ + public final Builder complete(boolean value) { + this.complete = value; + return this; + } + + /** + * Required - API name: {@code total_indices_in_data_stream} + */ + public final Builder totalIndicesInDataStream(int value) { + this.totalIndicesInDataStream = value; + return this; + } + + /** + * Required - API name: {@code total_indices_requiring_upgrade} + */ + public final Builder totalIndicesRequiringUpgrade(int value) { + this.totalIndicesRequiringUpgrade = value; + return this; + } + + /** + * Required - API name: {@code successes} + */ + public final Builder successes(int value) { + this.successes = value; + return this; + } + + /** + * Required - API name: {@code in_progress} + *

+ * Adds all elements of list to inProgress. + */ + public final Builder inProgress(List list) { + this.inProgress = _listAddAll(this.inProgress, list); + return this; + } + + /** + * Required - API name: {@code in_progress} + *

+ * Adds one or more values to inProgress. + */ + public final Builder inProgress(StatusInProgress value, StatusInProgress... values) { + this.inProgress = _listAdd(this.inProgress, value, values); + return this; + } + + /** + * Required - API name: {@code in_progress} + *

+ * Adds a value to inProgress using a builder lambda. + */ + public final Builder inProgress(Function> fn) { + return inProgress(fn.apply(new StatusInProgress.Builder()).build()); + } + + /** + * Required - API name: {@code pending} + */ + public final Builder pending(int value) { + this.pending = value; + return this; + } + + /** + * Required - API name: {@code errors} + *

+ * Adds all elements of list to errors. + */ + public final Builder errors(List list) { + this.errors = _listAddAll(this.errors, list); + return this; + } + + /** + * Required - API name: {@code errors} + *

+ * Adds one or more values to errors. + */ + public final Builder errors(StatusError value, StatusError... values) { + this.errors = _listAdd(this.errors, value, values); + return this; + } + + /** + * Required - API name: {@code errors} + *

+ * Adds a value to errors using a builder lambda. + */ + public final Builder errors(Function> fn) { + return errors(fn.apply(new StatusError.Builder()).build()); + } + + /** + * API name: {@code exception} + */ + public final Builder exception(@Nullable String value) { + this.exception = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetMigrateReindexStatusResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetMigrateReindexStatusResponse build() { + _checkSingleUse(); + + return new GetMigrateReindexStatusResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link GetMigrateReindexStatusResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, GetMigrateReindexStatusResponse::setupGetMigrateReindexStatusResponseDeserializer); + + protected static void setupGetMigrateReindexStatusResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::startTime, DateTime._DESERIALIZER, "start_time"); + op.add(Builder::startTimeMillis, JsonpDeserializer.longDeserializer(), "start_time_millis"); + op.add(Builder::complete, JsonpDeserializer.booleanDeserializer(), "complete"); + op.add(Builder::totalIndicesInDataStream, JsonpDeserializer.integerDeserializer(), + "total_indices_in_data_stream"); + op.add(Builder::totalIndicesRequiringUpgrade, JsonpDeserializer.integerDeserializer(), + "total_indices_requiring_upgrade"); + op.add(Builder::successes, JsonpDeserializer.integerDeserializer(), "successes"); + op.add(Builder::inProgress, JsonpDeserializer.arrayDeserializer(StatusInProgress._DESERIALIZER), "in_progress"); + op.add(Builder::pending, JsonpDeserializer.integerDeserializer(), "pending"); + op.add(Builder::errors, JsonpDeserializer.arrayDeserializer(StatusError._DESERIALIZER), "errors"); + op.add(Builder::exception, JsonpDeserializer.stringDeserializer(), "exception"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java index 8cffd5c26..1cdba3ffa 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java @@ -64,7 +64,7 @@ /** * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings">Documentation * on elastic.co * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettingsLifecycle.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettingsLifecycle.java index 045ac9807..6ea6e1cfe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettingsLifecycle.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettingsLifecycle.java @@ -79,6 +79,9 @@ public class IndexSettingsLifecycle implements JsonpSerializable { @Nullable private final String rolloverAlias; + @Nullable + private final Boolean preferIlm; + // --------------------------------------------------------------------------------------------- private IndexSettingsLifecycle(Builder builder) { @@ -89,6 +92,7 @@ private IndexSettingsLifecycle(Builder builder) { this.parseOriginationDate = builder.parseOriginationDate; this.step = builder.step; this.rolloverAlias = builder.rolloverAlias; + this.preferIlm = builder.preferIlm; } @@ -169,6 +173,17 @@ public final String rolloverAlias() { return this.rolloverAlias; } + /** + * Preference for the system that manages a data stream backing index + * (preferring ILM when both ILM and DLM are applicable for an index). + *

+ * API name: {@code prefer_ilm} + */ + @Nullable + public final Boolean preferIlm() { + return this.preferIlm; + } + /** * Serialize this object to JSON. */ @@ -210,6 +225,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.rolloverAlias); } + if (this.preferIlm != null) { + generator.writeKey("prefer_ilm"); + generator.write(this.preferIlm); + + } } @@ -245,6 +265,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private String rolloverAlias; + @Nullable + private Boolean preferIlm; + /** * The name of the policy to use to manage the index. For information about how * Elasticsearch applies policy changes, see Policy updates. @@ -326,6 +349,17 @@ public final Builder rolloverAlias(@Nullable String value) { return this; } + /** + * Preference for the system that manages a data stream backing index + * (preferring ILM when both ILM and DLM are applicable for an index). + *

+ * API name: {@code prefer_ilm} + */ + public final Builder preferIlm(@Nullable Boolean value) { + this.preferIlm = value; + return this; + } + @Override protected Builder self() { return this; @@ -361,6 +395,7 @@ protected static void setupIndexSettingsLifecycleDeserializer( op.add(Builder::parseOriginationDate, JsonpDeserializer.booleanDeserializer(), "parse_origination_date"); op.add(Builder::step, IndexSettingsLifecycleStep._DESERIALIZER, "step"); op.add(Builder::rolloverAlias, JsonpDeserializer.stringDeserializer(), "rollover_alias"); + op.add(Builder::preferIlm, JsonpDeserializer.booleanDeserializer(), "prefer_ilm"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MappingLimitSettings.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MappingLimitSettings.java index 74deaf2a1..5a5730033 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MappingLimitSettings.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MappingLimitSettings.java @@ -55,7 +55,7 @@ * Mapping Limit Settings * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-settings-limit.html">Documentation * on elastic.co * @see API @@ -84,6 +84,9 @@ public class MappingLimitSettings implements JsonpSerializable { @Nullable private final MappingLimitSettingsDimensionFields dimensionFields; + @Nullable + private final MappingLimitSettingsSourceFields source; + @Nullable private final Boolean ignoreMalformed; @@ -98,6 +101,7 @@ private MappingLimitSettings(Builder builder) { this.nestedObjects = builder.nestedObjects; this.fieldNameLength = builder.fieldNameLength; this.dimensionFields = builder.dimensionFields; + this.source = builder.source; this.ignoreMalformed = builder.ignoreMalformed; } @@ -162,6 +166,14 @@ public final MappingLimitSettingsDimensionFields dimensionFields() { return this.dimensionFields; } + /** + * API name: {@code source} + */ + @Nullable + public final MappingLimitSettingsSourceFields source() { + return this.source; + } + /** * API name: {@code ignore_malformed} */ @@ -215,6 +227,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("dimension_fields"); this.dimensionFields.serialize(generator, mapper); + } + if (this.source != null) { + generator.writeKey("source"); + this.source.serialize(generator, mapper); + } if (this.ignoreMalformed != null) { generator.writeKey("ignore_malformed"); @@ -259,6 +276,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private MappingLimitSettingsDimensionFields dimensionFields; + @Nullable + private MappingLimitSettingsSourceFields source; + @Nullable private Boolean ignoreMalformed; @@ -366,6 +386,22 @@ public final Builder dimensionFields( return this.dimensionFields(fn.apply(new MappingLimitSettingsDimensionFields.Builder()).build()); } + /** + * API name: {@code source} + */ + public final Builder source(@Nullable MappingLimitSettingsSourceFields value) { + this.source = value; + return this; + } + + /** + * API name: {@code source} + */ + public final Builder source( + Function> fn) { + return this.source(fn.apply(new MappingLimitSettingsSourceFields.Builder()).build()); + } + /** * API name: {@code ignore_malformed} */ @@ -409,6 +445,7 @@ protected static void setupMappingLimitSettingsDeserializer(ObjectDeserializerAPI + * specification + */ +@JsonpDeserializable +public class MappingLimitSettingsSourceFields implements JsonpSerializable { + private final SourceMode mode; + + // --------------------------------------------------------------------------------------------- + + private MappingLimitSettingsSourceFields(Builder builder) { + + this.mode = ApiTypeHelper.requireNonNull(builder.mode, this, "mode"); + + } + + public static MappingLimitSettingsSourceFields of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code mode} + */ + public final SourceMode mode() { + return this.mode; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("mode"); + this.mode.serialize(generator, mapper); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link MappingLimitSettingsSourceFields}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private SourceMode mode; + + /** + * Required - API name: {@code mode} + */ + public final Builder mode(SourceMode value) { + this.mode = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link MappingLimitSettingsSourceFields}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public MappingLimitSettingsSourceFields build() { + _checkSingleUse(); + + return new MappingLimitSettingsSourceFields(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link MappingLimitSettingsSourceFields} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, MappingLimitSettingsSourceFields::setupMappingLimitSettingsSourceFieldsDeserializer); + + protected static void setupMappingLimitSettingsSourceFieldsDeserializer( + ObjectDeserializer op) { + + op.add(Builder::mode, SourceMode._DESERIALIZER, "mode"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexRequest.java new file mode 100644 index 000000000..60558aabe --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexRequest.java @@ -0,0 +1,191 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch.indices.migrate_reindex.MigrateReindex; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import jakarta.json.stream.JsonParser; +import java.util.Collections; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.migrate_reindex.Request + +/** + * Reindex legacy backing indices. + *

+ * Reindex all legacy backing indices for a data stream. This operation occurs + * in a persistent task. The persistent task ID is returned immediately and the + * reindexing work is completed in that task. + * + * @see API + * specification + */ +@JsonpDeserializable +public class MigrateReindexRequest extends RequestBase implements JsonpSerializable { + private final MigrateReindex reindex; + + // --------------------------------------------------------------------------------------------- + + private MigrateReindexRequest(Builder builder) { + + this.reindex = ApiTypeHelper.requireNonNull(builder.reindex, this, "reindex"); + + } + + public static MigrateReindexRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - Request body. + */ + public final MigrateReindex reindex() { + return this.reindex; + } + + /** + * Serialize this value to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + this.reindex.serialize(generator, mapper); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link MigrateReindexRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private MigrateReindex reindex; + + /** + * Required - Request body. + */ + public final Builder reindex(MigrateReindex value) { + this.reindex = value; + return this; + } + + /** + * Required - Request body. + */ + public final Builder reindex(Function> fn) { + return this.reindex(fn.apply(new MigrateReindex.Builder()).build()); + } + + @Override + public Builder withJson(JsonParser parser, JsonpMapper mapper) { + + @SuppressWarnings("unchecked") + MigrateReindex value = (MigrateReindex) MigrateReindex._DESERIALIZER.deserialize(parser, mapper); + return this.reindex(value); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link MigrateReindexRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public MigrateReindexRequest build() { + _checkSingleUse(); + + return new MigrateReindexRequest(this); + } + } + + public static final JsonpDeserializer _DESERIALIZER = createMigrateReindexRequestDeserializer(); + protected static JsonpDeserializer createMigrateReindexRequestDeserializer() { + + JsonpDeserializer valueDeserializer = MigrateReindex._DESERIALIZER; + + return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() + .reindex(valueDeserializer.deserialize(parser, mapper, event)).build()); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.migrate_reindex}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.migrate_reindex", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_migration/reindex"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, MigrateReindexResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexResponse.java new file mode 100644 index 000000000..c4a5de26c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/MigrateReindexResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.migrate_reindex.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class MigrateReindexResponse extends AcknowledgedResponseBase { + // --------------------------------------------------------------------------------------------- + + private MigrateReindexResponse(Builder builder) { + super(builder); + + } + + public static MigrateReindexResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link MigrateReindexResponse}. + */ + + public static class Builder extends AcknowledgedResponseBase.AbstractBuilder + implements + ObjectBuilder { + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link MigrateReindexResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public MigrateReindexResponse build() { + _checkSingleUse(); + + return new MigrateReindexResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link MigrateReindexResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, MigrateReindexResponse::setupMigrateReindexResponseDeserializer); + + protected static void setupMigrateReindexResponseDeserializer( + ObjectDeserializer op) { + AcknowledgedResponseBase.setupAcknowledgedResponseBaseDeserializer(op); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java index a499bf4bf..f1a365d63 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java @@ -22,6 +22,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.ExpandWildcard; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -59,9 +60,12 @@ // typedef: indices.resolve_cluster.Request /** - * Resolve the cluster. Resolve the specified index expressions to return - * information about each cluster, including the local cluster, if included. - * Multiple patterns and remote clusters are supported. + * Resolve the cluster. + *

+ * Resolve the specified index expressions to return information about each + * cluster, including the local "querying" cluster, if included. If no + * index expression is provided, the API will return information about all the + * remote clusters that are configured on the querying cluster. *

* This endpoint is useful before doing a cross-cluster search in order to * determine which remote clusters should be included in a search. @@ -73,7 +77,9 @@ * For each cluster in the index expression, information is returned about: *

- *

- * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @see Documentation * on elastic.co @@ -1133,17 +989,7 @@ public CompletableFuture putHuggingFace(PutHuggingFaceRe *

  • multilingual-e5-base
  • *
  • multilingual-e5-small
  • * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @param fn * a function that initializes a builder to create the * {@link PutHuggingFaceRequest} @@ -1169,16 +1015,6 @@ public final CompletableFuture putHuggingFace( * https://jina.ai/reranker. To review * the available text_embedding models, refer to the * https://jina.ai/embeddings/. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1202,16 +1038,6 @@ public CompletableFuture putJinaai(PutJinaaiRequest request) * https://jina.ai/reranker. To review * the available text_embedding models, refer to the * https://jina.ai/embeddings/. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1233,16 +1059,6 @@ public final CompletableFuture putJinaai( *

    * Creates an inference endpoint to perform an inference task with the * mistral service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1261,16 +1077,6 @@ public CompletableFuture putMistral(PutMistralRequest reques *

    * Creates an inference endpoint to perform an inference task with the * mistral service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1292,16 +1098,6 @@ public final CompletableFuture putMistral( *

    * Create an inference endpoint to perform an inference task with the * openai service or openai compatible APIs. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1320,16 +1116,6 @@ public CompletableFuture putOpenai(PutOpenaiRequest request) *

    * Create an inference endpoint to perform an inference task with the * openai service or openai compatible APIs. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1399,16 +1185,6 @@ public final CompletableFuture putVoyageai( * Elasticsearch deployment to use the watsonxai inference service. * You can provision one through the IBM catalog, the Cloud Databases CLI * plug-in, the Cloud Databases API, or Terraform. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1430,16 +1206,6 @@ public CompletableFuture putWatsonx(PutWatsonxRequest reques * Elasticsearch deployment to use the watsonxai inference service. * You can provision one through the IBM catalog, the Cloud Databases CLI * plug-in, the Cloud Databases API, or Terraform. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java index de476c06e..a97f61918 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java @@ -70,6 +70,28 @@ public ElasticsearchInferenceClient withTransportOptions(@Nullable TransportOpti /** * Perform chat completion inference + *

    + * The chat completion inference API enables real-time responses for chat + * completion tasks by delivering answers incrementally, reducing response times + * during computation. It only works with the chat_completion task + * type for openai and elastic inference services. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + * Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + * Eland, the inference APIs offer an alternative way to use and manage trained + * models. However, if you do not plan to use the inference APIs to use these + * models or if you want to use non-NLP models, use the machine learning trained + * model APIs. + *

    + * NOTE: The chat_completion task type is only available within the + * _stream API and only supports streaming. The Chat completion inference API + * and the Stream inference API differ in their response structure and + * capabilities. The Chat completion inference API provides more comprehensive + * customization options through more fields and function calling support. If + * you use the openai service or the elastic service, + * use the Chat completion inference API. * * @see Documentation @@ -86,6 +108,28 @@ public BinaryResponse chatCompletionUnified(ChatCompletionUnifiedRequest request /** * Perform chat completion inference + *

    + * The chat completion inference API enables real-time responses for chat + * completion tasks by delivering answers incrementally, reducing response times + * during computation. It only works with the chat_completion task + * type for openai and elastic inference services. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + * Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + * Eland, the inference APIs offer an alternative way to use and manage trained + * models. However, if you do not plan to use the inference APIs to use these + * models or if you want to use non-NLP models, use the machine learning trained + * model APIs. + *

    + * NOTE: The chat_completion task type is only available within the + * _stream API and only supports streaming. The Chat completion inference API + * and the Stream inference API differ in their response structure and + * capabilities. The Chat completion inference API provides more comprehensive + * customization options through more fields and function calling support. If + * you use the openai service or the elastic service, + * use the Chat completion inference API. * * @param fn * a function that initializes a builder to create the @@ -292,16 +336,7 @@ public final InferenceResponse inference(Function"state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. + * Create an inference endpoint. *

    * IMPORTANT: The inference APIs enable you to use certain services, such as * built-in machine learning models (ELSER, E5), models uploaded through Eland, @@ -325,16 +360,7 @@ public PutResponse put(PutRequest request) throws IOException, ElasticsearchExce } /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. + * Create an inference endpoint. *

    * IMPORTANT: The inference APIs enable you to use certain services, such as * built-in machine learning models (ELSER, E5), models uploaded through Eland, @@ -365,16 +391,6 @@ public final PutResponse put(Function * Create an inference endpoint to perform an inference task with the * alibabacloud-ai-search service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -394,16 +410,6 @@ public PutAlibabacloudResponse putAlibabacloud(PutAlibabacloudRequest request) *

    * Create an inference endpoint to perform an inference task with the * alibabacloud-ai-search service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -435,17 +441,7 @@ public final PutAlibabacloudResponse putAlibabacloud( * updated keys. *

    * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @see Documentation * on elastic.co @@ -473,17 +469,7 @@ public PutAmazonbedrockResponse putAmazonbedrock(PutAmazonbedrockRequest request * updated keys. *

    * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @param fn * a function that initializes a builder to create the * {@link PutAmazonbedrockRequest} @@ -505,16 +491,6 @@ public final PutAmazonbedrockResponse putAmazonbedrock( *

    * Create an inference endpoint to perform an inference task with the * anthropic service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -533,16 +509,6 @@ public PutAnthropicResponse putAnthropic(PutAnthropicRequest request) throws IOE *

    * Create an inference endpoint to perform an inference task with the * anthropic service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -565,16 +531,6 @@ public final PutAnthropicResponse putAnthropic( *

    * Create an inference endpoint to perform an inference task with the * azureaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -594,16 +550,6 @@ public PutAzureaistudioResponse putAzureaistudio(PutAzureaistudioRequest request *

    * Create an inference endpoint to perform an inference task with the * azureaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -641,16 +587,6 @@ public final PutAzureaistudioResponse putAzureaistudio( * be found in the Azure * models documentation. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -685,16 +621,6 @@ public PutAzureopenaiResponse putAzureopenai(PutAzureopenaiRequest request) * be found in the Azure * models documentation. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -717,16 +643,6 @@ public final PutAzureopenaiResponse putAzureopenai( *

    * Create an inference endpoint to perform an inference task with the * cohere service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -745,16 +661,6 @@ public PutCohereResponse putCohere(PutCohereRequest request) throws IOException, *

    * Create an inference endpoint to perform an inference task with the * cohere service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -964,16 +870,6 @@ public final PutElserResponse putElser(Function * Create an inference endpoint to perform an inference task with the * googleaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -993,16 +889,6 @@ public PutGoogleaistudioResponse putGoogleaistudio(PutGoogleaistudioRequest requ *

    * Create an inference endpoint to perform an inference task with the * googleaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1025,16 +911,6 @@ public final PutGoogleaistudioResponse putGoogleaistudio( *

    * Create an inference endpoint to perform an inference task with the * googlevertexai service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1054,16 +930,6 @@ public PutGooglevertexaiResponse putGooglevertexai(PutGooglevertexaiRequest requ *

    * Create an inference endpoint to perform an inference task with the * googlevertexai service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1104,17 +970,7 @@ public final PutGooglevertexaiResponse putGooglevertexai( *

  • multilingual-e5-base
  • *
  • multilingual-e5-small
  • * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @see Documentation * on elastic.co @@ -1151,17 +1007,7 @@ public PutHuggingFaceResponse putHuggingFace(PutHuggingFaceRequest request) *

  • multilingual-e5-base
  • *
  • multilingual-e5-small
  • * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @param fn * a function that initializes a builder to create the * {@link PutHuggingFaceRequest} @@ -1188,16 +1034,6 @@ public final PutHuggingFaceResponse putHuggingFace( * https://jina.ai/reranker. To review * the available text_embedding models, refer to the * https://jina.ai/embeddings/. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1221,16 +1057,6 @@ public PutJinaaiResponse putJinaai(PutJinaaiRequest request) throws IOException, * https://jina.ai/reranker. To review * the available text_embedding models, refer to the * https://jina.ai/embeddings/. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1252,16 +1078,6 @@ public final PutJinaaiResponse putJinaai(Function * Creates an inference endpoint to perform an inference task with the * mistral service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1280,16 +1096,6 @@ public PutMistralResponse putMistral(PutMistralRequest request) throws IOExcepti *

    * Creates an inference endpoint to perform an inference task with the * mistral service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1311,16 +1117,6 @@ public final PutMistralResponse putMistral(Function * Create an inference endpoint to perform an inference task with the * openai service or openai compatible APIs. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1339,16 +1135,6 @@ public PutOpenaiResponse putOpenai(PutOpenaiRequest request) throws IOException, *

    * Create an inference endpoint to perform an inference task with the * openai service or openai compatible APIs. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the @@ -1419,16 +1205,6 @@ public final PutVoyageaiResponse putVoyageai( * Elasticsearch deployment to use the watsonxai inference service. * You can provision one through the IBM catalog, the Cloud Databases CLI * plug-in, the Cloud Databases API, or Terraform. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see Documentation @@ -1450,16 +1226,6 @@ public PutWatsonxResponse putWatsonx(PutWatsonxRequest request) throws IOExcepti * Elasticsearch deployment to use the watsonxai inference service. * You can provision one through the IBM catalog, the Cloud Databases CLI * plug-in, the Cloud Databases API, or Terraform. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoJinaAi.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoJinaAi.java new file mode 100644 index 000000000..da46627ab --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoJinaAi.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.inference; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: inference._types.InferenceEndpointInfoJinaAi + +/** + * + * @see API + * specification + */ + +public abstract class InferenceEndpointInfoJinaAi extends InferenceEndpoint { + private final String inferenceId; + + private final TaskTypeJinaAi taskType; + + // --------------------------------------------------------------------------------------------- + + protected InferenceEndpointInfoJinaAi(AbstractBuilder builder) { + super(builder); + + this.inferenceId = ApiTypeHelper.requireNonNull(builder.inferenceId, this, "inferenceId"); + this.taskType = ApiTypeHelper.requireNonNull(builder.taskType, this, "taskType"); + + } + + /** + * Required - The inference Id + *

    + * API name: {@code inference_id} + */ + public final String inferenceId() { + return this.inferenceId; + } + + /** + * Required - The task type + *

    + * API name: {@code task_type} + */ + public final TaskTypeJinaAi taskType() { + return this.taskType; + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + super.serializeInternal(generator, mapper); + generator.writeKey("inference_id"); + generator.write(this.inferenceId); + + generator.writeKey("task_type"); + this.taskType.serialize(generator, mapper); + + } + + public abstract static class AbstractBuilder> + extends + InferenceEndpoint.AbstractBuilder { + private String inferenceId; + + private TaskTypeJinaAi taskType; + + /** + * Required - The inference Id + *

    + * API name: {@code inference_id} + */ + public final BuilderT inferenceId(String value) { + this.inferenceId = value; + return self(); + } + + /** + * Required - The task type + *

    + * API name: {@code task_type} + */ + public final BuilderT taskType(TaskTypeJinaAi value) { + this.taskType = value; + return self(); + } + + } + + // --------------------------------------------------------------------------------------------- + protected static > void setupInferenceEndpointInfoJinaAiDeserializer( + ObjectDeserializer op) { + InferenceEndpoint.setupInferenceEndpointDeserializer(op); + op.add(AbstractBuilder::inferenceId, JsonpDeserializer.stringDeserializer(), "inference_id"); + op.add(AbstractBuilder::taskType, TaskTypeJinaAi._DESERIALIZER, "task_type"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAlibabacloudRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAlibabacloudRequest.java index c0954788d..55378b971 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAlibabacloudRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAlibabacloudRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * alibabacloud-ai-search service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAmazonbedrockRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAmazonbedrockRequest.java index 55b01e4bf..7afaf21bf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAmazonbedrockRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAmazonbedrockRequest.java @@ -71,17 +71,7 @@ * updated keys. *

    * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAnthropicRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAnthropicRequest.java index 2d1507c5e..afb447140 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAnthropicRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAnthropicRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * anthropic service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureaistudioRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureaistudioRequest.java index 3a0d1ef54..87415cc8c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureaistudioRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureaistudioRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * azureaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureopenaiRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureopenaiRequest.java index 9b14d99ec..37aa2292c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureopenaiRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutAzureopenaiRequest.java @@ -77,16 +77,6 @@ * be found in the Azure * models documentation. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutCohereRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutCohereRequest.java index 26de9cff1..3da2f26f5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutCohereRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutCohereRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * cohere service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGoogleaistudioRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGoogleaistudioRequest.java index fb2949825..32b9db6cf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGoogleaistudioRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGoogleaistudioRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * googleaistudio service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGooglevertexaiRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGooglevertexaiRequest.java index c491288d4..5f9082e54 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGooglevertexaiRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutGooglevertexaiRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * googlevertexai service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutHuggingFaceRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutHuggingFaceRequest.java index 3b081c97f..1d494fa22 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutHuggingFaceRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutHuggingFaceRequest.java @@ -80,17 +80,7 @@ *

  • multilingual-e5-base
  • *
  • multilingual-e5-small
  • * - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. - * + * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiRequest.java index ab9b47ccb..924d75223 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiRequest.java @@ -67,16 +67,6 @@ * https://jina.ai/reranker. To review * the available text_embedding models, refer to the * https://jina.ai/embeddings/. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiResponse.java index 6d40a39ab..44aacfdbd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutJinaaiResponse.java @@ -51,7 +51,7 @@ * specification */ @JsonpDeserializable -public class PutJinaaiResponse extends InferenceEndpointInfo { +public class PutJinaaiResponse extends InferenceEndpointInfoJinaAi { // --------------------------------------------------------------------------------------------- private PutJinaaiResponse(Builder builder) { @@ -69,7 +69,7 @@ public static PutJinaaiResponse of(Function + public static class Builder extends InferenceEndpointInfoJinaAi.AbstractBuilder implements ObjectBuilder { @Override @@ -99,7 +99,7 @@ public PutJinaaiResponse build() { .lazy(Builder::new, PutJinaaiResponse::setupPutJinaaiResponseDeserializer); protected static void setupPutJinaaiResponseDeserializer(ObjectDeserializer op) { - InferenceEndpointInfo.setupInferenceEndpointInfoDeserializer(op); + InferenceEndpointInfoJinaAi.setupInferenceEndpointInfoJinaAiDeserializer(op); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutMistralRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutMistralRequest.java index cde473677..cfc6451ee 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutMistralRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutMistralRequest.java @@ -62,16 +62,6 @@ *

    * Creates an inference endpoint to perform an inference task with the * mistral service. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutOpenaiRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutOpenaiRequest.java index 2b10fe33f..bbc89dd1a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutOpenaiRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutOpenaiRequest.java @@ -62,16 +62,6 @@ *

    * Create an inference endpoint to perform an inference task with the * openai service or openai compatible APIs. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java index 6f734f226..1336a1320 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java @@ -59,16 +59,7 @@ // typedef: inference.put.Request /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. + * Create an inference endpoint. *

    * IMPORTANT: The inference APIs enable you to use certain services, such as * built-in machine learning models (ELSER, E5), models uploaded through Eland, diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutWatsonxRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutWatsonxRequest.java index 2529929f7..dad5c0e86 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutWatsonxRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutWatsonxRequest.java @@ -65,16 +65,6 @@ * Elasticsearch deployment to use the watsonxai inference service. * You can provision one through the IBM catalog, the Cloud Databases CLI * plug-in, the Cloud Databases API, or Terraform. - *

    - * When you create an inference endpoint, the associated machine learning model - * is automatically deployed if it is not already running. After creating the - * endpoint, wait for the model deployment to complete before using it. To - * verify the deployment status, use the get trained model statistics API. Look - * for "state": "fully_allocated" in the - * response and ensure that the "allocation_count" - * matches the "target_allocation_count". Avoid creating - * multiple endpoints for the same model unless required, as each endpoint - * consumes significant resources. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/RequestChatCompletion.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/RequestChatCompletion.java index ed267fc8e..99628e8c0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/RequestChatCompletion.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/RequestChatCompletion.java @@ -104,7 +104,12 @@ public static RequestChatCompletion of(Functionuser). The + * other message roles (assistant, system, or + * tool) should generally only be copied from the response to a + * previous completion request, such that the messages array is built up + * throughout a conversation. *

    * API name: {@code messages} */ @@ -289,7 +294,12 @@ public static class Builder extends WithJsonObjectBuilderBase private Float topP; /** - * Required - A list of objects representing the conversation. + * Required - A list of objects representing the conversation. Requests should + * generally only add new messages from the user (role user). The + * other message roles (assistant, system, or + * tool) should generally only be copied from the response to a + * previous completion request, such that the messages array is built up + * throughout a conversation. *

    * API name: {@code messages} *

    @@ -301,7 +311,12 @@ public final Builder messages(List list) { } /** - * Required - A list of objects representing the conversation. + * Required - A list of objects representing the conversation. Requests should + * generally only add new messages from the user (role user). The + * other message roles (assistant, system, or + * tool) should generally only be copied from the response to a + * previous completion request, such that the messages array is built up + * throughout a conversation. *

    * API name: {@code messages} *

    @@ -313,7 +328,12 @@ public final Builder messages(Message value, Message... values) { } /** - * Required - A list of objects representing the conversation. + * Required - A list of objects representing the conversation. Requests should + * generally only add new messages from the user (role user). The + * other message roles (assistant, system, or + * tool) should generally only be copied from the response to a + * previous completion request, such that the messages array is built up + * throughout a conversation. *

    * API name: {@code messages} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeJinaAi.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeJinaAi.java new file mode 100644 index 000000000..21aa5602f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeJinaAi.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.inference; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum TaskTypeJinaAi implements JsonEnum { + TextEmbedding("text_embedding"), + + Rerank("rerank"), + + ; + + private final String jsonValue; + + TaskTypeJinaAi(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + TaskTypeJinaAi.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeResponse.java index 7a70c41c6..3616861cd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeResponse.java @@ -32,6 +32,7 @@ import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.lang.Boolean; +import java.lang.String; import java.util.List; import java.util.Objects; import java.util.function.Function; @@ -66,12 +67,16 @@ public class PostFeatureUpgradeResponse implements JsonpSerializable { private final List features; + @Nullable + private final String reason; + // --------------------------------------------------------------------------------------------- private PostFeatureUpgradeResponse(Builder builder) { this.accepted = ApiTypeHelper.requireNonNull(builder.accepted, this, "accepted", false); - this.features = ApiTypeHelper.unmodifiableRequired(builder.features, this, "features"); + this.features = ApiTypeHelper.unmodifiable(builder.features); + this.reason = builder.reason; } @@ -87,12 +92,20 @@ public final boolean accepted() { } /** - * Required - API name: {@code features} + * API name: {@code features} */ public final List features() { return this.features; } + /** + * API name: {@code reason} + */ + @Nullable + public final String reason() { + return this.reason; + } + /** * Serialize this object to JSON. */ @@ -117,6 +130,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } + if (this.reason != null) { + generator.writeKey("reason"); + generator.write(this.reason); + + } } @@ -136,8 +154,12 @@ public static class Builder extends WithJsonObjectBuilderBase ObjectBuilder { private Boolean accepted; + @Nullable private List features; + @Nullable + private String reason; + /** * Required - API name: {@code accepted} */ @@ -147,7 +169,7 @@ public final Builder accepted(boolean value) { } /** - * Required - API name: {@code features} + * API name: {@code features} *

    * Adds all elements of list to features. */ @@ -157,7 +179,7 @@ public final Builder features(List list) { } /** - * Required - API name: {@code features} + * API name: {@code features} *

    * Adds one or more values to features. */ @@ -167,7 +189,7 @@ public final Builder features(MigrationFeature value, MigrationFeature... values } /** - * Required - API name: {@code features} + * API name: {@code features} *

    * Adds a value to features using a builder lambda. */ @@ -175,6 +197,14 @@ public final Builder features(Function getRepository() { // ----- Endpoint: snapshot.repository_analyze /** - * Analyze a snapshot repository. Analyze the performance characteristics and - * any incorrect behaviour found in a repository. + * Analyze a snapshot repository. *

    - * The response exposes implementation details of the analysis which may change - * from version to version. The response body format is therefore not considered - * stable and may be different in newer versions. + * Performs operations on a snapshot repository in order to check for incorrect + * behaviour. *

    * There are a large number of third-party storage systems available, not all of * which are suitable for use as a snapshot repository by Elasticsearch. Some @@ -413,24 +411,45 @@ public CompletableFuture getRepository() { * The default values for the parameters are deliberately low to reduce the * impact of running an analysis inadvertently and to provide a sensible * starting point for your investigations. Run your first analysis with the - * default parameter values to check for simple problems. If successful, run a - * sequence of increasingly large analyses until you encounter a failure or you - * reach a blob_count of at least 2000, a + * default parameter values to check for simple problems. Some repositories may + * behave correctly when lightly loaded but incorrectly under production-like + * workloads. If the first analysis is successful, run a sequence of + * increasingly large analyses until you encounter a failure or you reach a + * blob_count of at least 2000, a * max_blob_size of at least 2gb, a * max_total_data_size of at least 1tb, and a * register_operation_count of at least 100. Always * specify a generous timeout, possibly 1h or longer, to allow time - * for each analysis to run to completion. Perform the analyses using a - * multi-node cluster of a similar size to your production cluster so that it - * can detect any problems that only arise when the repository is accessed by - * many nodes at once. + * for each analysis to run to completion. Some repositories may behave + * correctly when accessed by a small number of Elasticsearch nodes but + * incorrectly when accessed concurrently by a production-scale cluster. Perform + * the analyses using a multi-node cluster of a similar size to your production + * cluster so that it can detect any problems that only arise when the + * repository is accessed by many nodes at once. *

    * If the analysis fails, Elasticsearch detected that your repository behaved * unexpectedly. This usually means you are using a third-party storage system * with an incorrect or incompatible implementation of the API it claims to * support. If so, this storage system is not suitable for use as a snapshot - * repository. You will need to work with the supplier of your storage system to - * address the incompatibilities that Elasticsearch detects. + * repository. Repository analysis triggers conditions that occur only rarely + * when taking snapshots in a production system. Snapshotting to unsuitable + * storage may appear to work correctly most of the time despite repository + * analysis failures. However your snapshot data is at risk if you store it in a + * snapshot repository that does not reliably pass repository analysis. You can + * demonstrate that the analysis failure is due to an incompatible storage + * implementation by verifying that Elasticsearch does not detect the same + * problem when analysing the reference implementation of the storage protocol + * you are using. For instance, if you are using storage that offers an API + * which the supplier claims to be compatible with AWS S3, verify that + * repositories in AWS S3 do not fail repository analysis. This allows you to + * demonstrate to your storage supplier that a repository analysis failure must + * only be caused by an incompatibility with AWS S3 and cannot be attributed to + * a problem in Elasticsearch. Please do not report Elasticsearch issues + * involving third-party storage systems unless you can demonstrate that the + * same issue exists when analysing a repository that uses the reference + * implementation of the same storage protocol. You will need to work with the + * supplier of your storage system to address the incompatibilities that + * Elasticsearch detects. *

    * If the analysis is successful, the API returns details of the testing * process, optionally including how long each operation took. You can use this @@ -481,6 +500,8 @@ public CompletableFuture getRepository() { *

    * NOTE: This API is intended for exploratory use by humans. You should expect * the request parameters and the response format to vary in future versions. + * The response exposes immplementation details of the analysis which may change + * from version to version. *

    * NOTE: Different versions of Elasticsearch may perform different checks for * repository compatibility, with newer versions typically being stricter than @@ -566,12 +587,10 @@ public CompletableFuture repositoryAnalyze(Repository } /** - * Analyze a snapshot repository. Analyze the performance characteristics and - * any incorrect behaviour found in a repository. + * Analyze a snapshot repository. *

    - * The response exposes implementation details of the analysis which may change - * from version to version. The response body format is therefore not considered - * stable and may be different in newer versions. + * Performs operations on a snapshot repository in order to check for incorrect + * behaviour. *

    * There are a large number of third-party storage systems available, not all of * which are suitable for use as a snapshot repository by Elasticsearch. Some @@ -584,24 +603,45 @@ public CompletableFuture repositoryAnalyze(Repository * The default values for the parameters are deliberately low to reduce the * impact of running an analysis inadvertently and to provide a sensible * starting point for your investigations. Run your first analysis with the - * default parameter values to check for simple problems. If successful, run a - * sequence of increasingly large analyses until you encounter a failure or you - * reach a blob_count of at least 2000, a + * default parameter values to check for simple problems. Some repositories may + * behave correctly when lightly loaded but incorrectly under production-like + * workloads. If the first analysis is successful, run a sequence of + * increasingly large analyses until you encounter a failure or you reach a + * blob_count of at least 2000, a * max_blob_size of at least 2gb, a * max_total_data_size of at least 1tb, and a * register_operation_count of at least 100. Always * specify a generous timeout, possibly 1h or longer, to allow time - * for each analysis to run to completion. Perform the analyses using a - * multi-node cluster of a similar size to your production cluster so that it - * can detect any problems that only arise when the repository is accessed by - * many nodes at once. + * for each analysis to run to completion. Some repositories may behave + * correctly when accessed by a small number of Elasticsearch nodes but + * incorrectly when accessed concurrently by a production-scale cluster. Perform + * the analyses using a multi-node cluster of a similar size to your production + * cluster so that it can detect any problems that only arise when the + * repository is accessed by many nodes at once. *

    * If the analysis fails, Elasticsearch detected that your repository behaved * unexpectedly. This usually means you are using a third-party storage system * with an incorrect or incompatible implementation of the API it claims to * support. If so, this storage system is not suitable for use as a snapshot - * repository. You will need to work with the supplier of your storage system to - * address the incompatibilities that Elasticsearch detects. + * repository. Repository analysis triggers conditions that occur only rarely + * when taking snapshots in a production system. Snapshotting to unsuitable + * storage may appear to work correctly most of the time despite repository + * analysis failures. However your snapshot data is at risk if you store it in a + * snapshot repository that does not reliably pass repository analysis. You can + * demonstrate that the analysis failure is due to an incompatible storage + * implementation by verifying that Elasticsearch does not detect the same + * problem when analysing the reference implementation of the storage protocol + * you are using. For instance, if you are using storage that offers an API + * which the supplier claims to be compatible with AWS S3, verify that + * repositories in AWS S3 do not fail repository analysis. This allows you to + * demonstrate to your storage supplier that a repository analysis failure must + * only be caused by an incompatibility with AWS S3 and cannot be attributed to + * a problem in Elasticsearch. Please do not report Elasticsearch issues + * involving third-party storage systems unless you can demonstrate that the + * same issue exists when analysing a repository that uses the reference + * implementation of the same storage protocol. You will need to work with the + * supplier of your storage system to address the incompatibilities that + * Elasticsearch detects. *

    * If the analysis is successful, the API returns details of the testing * process, optionally including how long each operation took. You can use this @@ -652,6 +692,8 @@ public CompletableFuture repositoryAnalyze(Repository *

    * NOTE: This API is intended for exploratory use by humans. You should expect * the request parameters and the response format to vary in future versions. + * The response exposes immplementation details of the analysis which may change + * from version to version. *

    * NOTE: Different versions of Elasticsearch may perform different checks for * repository compatibility, with newer versions typically being stricter than diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java index 857c0f924..cf19da71d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java @@ -404,12 +404,10 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx // ----- Endpoint: snapshot.repository_analyze /** - * Analyze a snapshot repository. Analyze the performance characteristics and - * any incorrect behaviour found in a repository. + * Analyze a snapshot repository. *

    - * The response exposes implementation details of the analysis which may change - * from version to version. The response body format is therefore not considered - * stable and may be different in newer versions. + * Performs operations on a snapshot repository in order to check for incorrect + * behaviour. *

    * There are a large number of third-party storage systems available, not all of * which are suitable for use as a snapshot repository by Elasticsearch. Some @@ -422,24 +420,45 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx * The default values for the parameters are deliberately low to reduce the * impact of running an analysis inadvertently and to provide a sensible * starting point for your investigations. Run your first analysis with the - * default parameter values to check for simple problems. If successful, run a - * sequence of increasingly large analyses until you encounter a failure or you - * reach a blob_count of at least 2000, a + * default parameter values to check for simple problems. Some repositories may + * behave correctly when lightly loaded but incorrectly under production-like + * workloads. If the first analysis is successful, run a sequence of + * increasingly large analyses until you encounter a failure or you reach a + * blob_count of at least 2000, a * max_blob_size of at least 2gb, a * max_total_data_size of at least 1tb, and a * register_operation_count of at least 100. Always * specify a generous timeout, possibly 1h or longer, to allow time - * for each analysis to run to completion. Perform the analyses using a - * multi-node cluster of a similar size to your production cluster so that it - * can detect any problems that only arise when the repository is accessed by - * many nodes at once. + * for each analysis to run to completion. Some repositories may behave + * correctly when accessed by a small number of Elasticsearch nodes but + * incorrectly when accessed concurrently by a production-scale cluster. Perform + * the analyses using a multi-node cluster of a similar size to your production + * cluster so that it can detect any problems that only arise when the + * repository is accessed by many nodes at once. *

    * If the analysis fails, Elasticsearch detected that your repository behaved * unexpectedly. This usually means you are using a third-party storage system * with an incorrect or incompatible implementation of the API it claims to * support. If so, this storage system is not suitable for use as a snapshot - * repository. You will need to work with the supplier of your storage system to - * address the incompatibilities that Elasticsearch detects. + * repository. Repository analysis triggers conditions that occur only rarely + * when taking snapshots in a production system. Snapshotting to unsuitable + * storage may appear to work correctly most of the time despite repository + * analysis failures. However your snapshot data is at risk if you store it in a + * snapshot repository that does not reliably pass repository analysis. You can + * demonstrate that the analysis failure is due to an incompatible storage + * implementation by verifying that Elasticsearch does not detect the same + * problem when analysing the reference implementation of the storage protocol + * you are using. For instance, if you are using storage that offers an API + * which the supplier claims to be compatible with AWS S3, verify that + * repositories in AWS S3 do not fail repository analysis. This allows you to + * demonstrate to your storage supplier that a repository analysis failure must + * only be caused by an incompatibility with AWS S3 and cannot be attributed to + * a problem in Elasticsearch. Please do not report Elasticsearch issues + * involving third-party storage systems unless you can demonstrate that the + * same issue exists when analysing a repository that uses the reference + * implementation of the same storage protocol. You will need to work with the + * supplier of your storage system to address the incompatibilities that + * Elasticsearch detects. *

    * If the analysis is successful, the API returns details of the testing * process, optionally including how long each operation took. You can use this @@ -490,6 +509,8 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx *

    * NOTE: This API is intended for exploratory use by humans. You should expect * the request parameters and the response format to vary in future versions. + * The response exposes immplementation details of the analysis which may change + * from version to version. *

    * NOTE: Different versions of Elasticsearch may perform different checks for * repository compatibility, with newer versions typically being stricter than @@ -576,12 +597,10 @@ public RepositoryAnalyzeResponse repositoryAnalyze(RepositoryAnalyzeRequest requ } /** - * Analyze a snapshot repository. Analyze the performance characteristics and - * any incorrect behaviour found in a repository. + * Analyze a snapshot repository. *

    - * The response exposes implementation details of the analysis which may change - * from version to version. The response body format is therefore not considered - * stable and may be different in newer versions. + * Performs operations on a snapshot repository in order to check for incorrect + * behaviour. *

    * There are a large number of third-party storage systems available, not all of * which are suitable for use as a snapshot repository by Elasticsearch. Some @@ -594,24 +613,45 @@ public RepositoryAnalyzeResponse repositoryAnalyze(RepositoryAnalyzeRequest requ * The default values for the parameters are deliberately low to reduce the * impact of running an analysis inadvertently and to provide a sensible * starting point for your investigations. Run your first analysis with the - * default parameter values to check for simple problems. If successful, run a - * sequence of increasingly large analyses until you encounter a failure or you - * reach a blob_count of at least 2000, a + * default parameter values to check for simple problems. Some repositories may + * behave correctly when lightly loaded but incorrectly under production-like + * workloads. If the first analysis is successful, run a sequence of + * increasingly large analyses until you encounter a failure or you reach a + * blob_count of at least 2000, a * max_blob_size of at least 2gb, a * max_total_data_size of at least 1tb, and a * register_operation_count of at least 100. Always * specify a generous timeout, possibly 1h or longer, to allow time - * for each analysis to run to completion. Perform the analyses using a - * multi-node cluster of a similar size to your production cluster so that it - * can detect any problems that only arise when the repository is accessed by - * many nodes at once. + * for each analysis to run to completion. Some repositories may behave + * correctly when accessed by a small number of Elasticsearch nodes but + * incorrectly when accessed concurrently by a production-scale cluster. Perform + * the analyses using a multi-node cluster of a similar size to your production + * cluster so that it can detect any problems that only arise when the + * repository is accessed by many nodes at once. *

    * If the analysis fails, Elasticsearch detected that your repository behaved * unexpectedly. This usually means you are using a third-party storage system * with an incorrect or incompatible implementation of the API it claims to * support. If so, this storage system is not suitable for use as a snapshot - * repository. You will need to work with the supplier of your storage system to - * address the incompatibilities that Elasticsearch detects. + * repository. Repository analysis triggers conditions that occur only rarely + * when taking snapshots in a production system. Snapshotting to unsuitable + * storage may appear to work correctly most of the time despite repository + * analysis failures. However your snapshot data is at risk if you store it in a + * snapshot repository that does not reliably pass repository analysis. You can + * demonstrate that the analysis failure is due to an incompatible storage + * implementation by verifying that Elasticsearch does not detect the same + * problem when analysing the reference implementation of the storage protocol + * you are using. For instance, if you are using storage that offers an API + * which the supplier claims to be compatible with AWS S3, verify that + * repositories in AWS S3 do not fail repository analysis. This allows you to + * demonstrate to your storage supplier that a repository analysis failure must + * only be caused by an incompatibility with AWS S3 and cannot be attributed to + * a problem in Elasticsearch. Please do not report Elasticsearch issues + * involving third-party storage systems unless you can demonstrate that the + * same issue exists when analysing a repository that uses the reference + * implementation of the same storage protocol. You will need to work with the + * supplier of your storage system to address the incompatibilities that + * Elasticsearch detects. *

    * If the analysis is successful, the API returns details of the testing * process, optionally including how long each operation took. You can use this @@ -662,6 +702,8 @@ public RepositoryAnalyzeResponse repositoryAnalyze(RepositoryAnalyzeRequest requ *

    * NOTE: This API is intended for exploratory use by humans. You should expect * the request parameters and the response format to vary in future versions. + * The response exposes immplementation details of the analysis which may change + * from version to version. *

    * NOTE: Different versions of Elasticsearch may perform different checks for * repository compatibility, with newer versions typically being stricter than diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeRequest.java index 2f9e2d81e..402ecc64d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryAnalyzeRequest.java @@ -59,12 +59,10 @@ // typedef: snapshot.repository_analyze.Request /** - * Analyze a snapshot repository. Analyze the performance characteristics and - * any incorrect behaviour found in a repository. + * Analyze a snapshot repository. *

    - * The response exposes implementation details of the analysis which may change - * from version to version. The response body format is therefore not considered - * stable and may be different in newer versions. + * Performs operations on a snapshot repository in order to check for incorrect + * behaviour. *

    * There are a large number of third-party storage systems available, not all of * which are suitable for use as a snapshot repository by Elasticsearch. Some @@ -77,24 +75,45 @@ * The default values for the parameters are deliberately low to reduce the * impact of running an analysis inadvertently and to provide a sensible * starting point for your investigations. Run your first analysis with the - * default parameter values to check for simple problems. If successful, run a - * sequence of increasingly large analyses until you encounter a failure or you - * reach a blob_count of at least 2000, a + * default parameter values to check for simple problems. Some repositories may + * behave correctly when lightly loaded but incorrectly under production-like + * workloads. If the first analysis is successful, run a sequence of + * increasingly large analyses until you encounter a failure or you reach a + * blob_count of at least 2000, a * max_blob_size of at least 2gb, a * max_total_data_size of at least 1tb, and a * register_operation_count of at least 100. Always * specify a generous timeout, possibly 1h or longer, to allow time - * for each analysis to run to completion. Perform the analyses using a - * multi-node cluster of a similar size to your production cluster so that it - * can detect any problems that only arise when the repository is accessed by - * many nodes at once. + * for each analysis to run to completion. Some repositories may behave + * correctly when accessed by a small number of Elasticsearch nodes but + * incorrectly when accessed concurrently by a production-scale cluster. Perform + * the analyses using a multi-node cluster of a similar size to your production + * cluster so that it can detect any problems that only arise when the + * repository is accessed by many nodes at once. *

    * If the analysis fails, Elasticsearch detected that your repository behaved * unexpectedly. This usually means you are using a third-party storage system * with an incorrect or incompatible implementation of the API it claims to * support. If so, this storage system is not suitable for use as a snapshot - * repository. You will need to work with the supplier of your storage system to - * address the incompatibilities that Elasticsearch detects. + * repository. Repository analysis triggers conditions that occur only rarely + * when taking snapshots in a production system. Snapshotting to unsuitable + * storage may appear to work correctly most of the time despite repository + * analysis failures. However your snapshot data is at risk if you store it in a + * snapshot repository that does not reliably pass repository analysis. You can + * demonstrate that the analysis failure is due to an incompatible storage + * implementation by verifying that Elasticsearch does not detect the same + * problem when analysing the reference implementation of the storage protocol + * you are using. For instance, if you are using storage that offers an API + * which the supplier claims to be compatible with AWS S3, verify that + * repositories in AWS S3 do not fail repository analysis. This allows you to + * demonstrate to your storage supplier that a repository analysis failure must + * only be caused by an incompatibility with AWS S3 and cannot be attributed to + * a problem in Elasticsearch. Please do not report Elasticsearch issues + * involving third-party storage systems unless you can demonstrate that the + * same issue exists when analysing a repository that uses the reference + * implementation of the same storage protocol. You will need to work with the + * supplier of your storage system to address the incompatibilities that + * Elasticsearch detects. *

    * If the analysis is successful, the API returns details of the testing * process, optionally including how long each operation took. You can use this @@ -145,6 +164,8 @@ *

    * NOTE: This API is intended for exploratory use by humans. You should expect * the request parameters and the response format to vary in future versions. + * The response exposes immplementation details of the analysis which may change + * from version to version. *

    * NOTE: Different versions of Elasticsearch may perform different checks for * repository compatibility, with newer versions typically being stricter than diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotShardFailure.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotShardFailure.java index 31817895f..e409acfb0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotShardFailure.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotShardFailure.java @@ -30,6 +30,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.function.Function; @@ -67,7 +68,7 @@ public class SnapshotShardFailure implements JsonpSerializable { private final String reason; - private final String shardId; + private final int shardId; private final String indexUuid; @@ -80,7 +81,7 @@ private SnapshotShardFailure(Builder builder) { this.index = ApiTypeHelper.requireNonNull(builder.index, this, "index"); this.nodeId = builder.nodeId; this.reason = ApiTypeHelper.requireNonNull(builder.reason, this, "reason"); - this.shardId = ApiTypeHelper.requireNonNull(builder.shardId, this, "shardId"); + this.shardId = ApiTypeHelper.requireNonNull(builder.shardId, this, "shardId", 0); this.indexUuid = ApiTypeHelper.requireNonNull(builder.indexUuid, this, "indexUuid"); this.status = ApiTypeHelper.requireNonNull(builder.status, this, "status"); @@ -115,7 +116,7 @@ public final String reason() { /** * Required - API name: {@code shard_id} */ - public final String shardId() { + public final int shardId() { return this.shardId; } @@ -187,7 +188,7 @@ public static class Builder extends WithJsonObjectBuilderBase private String reason; - private String shardId; + private Integer shardId; private String indexUuid; @@ -220,7 +221,7 @@ public final Builder reason(String value) { /** * Required - API name: {@code shard_id} */ - public final Builder shardId(String value) { + public final Builder shardId(int value) { this.shardId = value; return this; } @@ -272,7 +273,7 @@ protected static void setupSnapshotShardFailureDeserializer(ObjectDeserializer Date: Tue, 3 Jun 2025 16:29:45 +0200 Subject: [PATCH 81/96] using stable conventions (#1017) --- java-client/build.gradle.kts | 5 ++-- .../OpenTelemetryForElasticsearch.java | 23 +++++++++---------- .../OpenTelemetryForElasticsearchTest.java | 16 +++++++------ 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/java-client/build.gradle.kts b/java-client/build.gradle.kts index ca291b7be..8d709daff 100644 --- a/java-client/build.gradle.kts +++ b/java-client/build.gradle.kts @@ -177,7 +177,7 @@ signing { dependencies { val elasticsearchVersion = "9.0.0" val jacksonVersion = "2.18.3" - val openTelemetryVersion = "1.29.0" + val openTelemetryVersion = "1.32.0" // Apache 2.0 // https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/java-rest-low.html @@ -209,8 +209,7 @@ dependencies { // Apache 2.0 // https://github.com/open-telemetry/opentelemetry-java implementation("io.opentelemetry", "opentelemetry-api", openTelemetryVersion) - // Use it once it's stable (see Instrumentation.java). Limited to tests for now. - testImplementation("io.opentelemetry", "opentelemetry-semconv", "$openTelemetryVersion-alpha") + implementation("io.opentelemetry.semconv","opentelemetry-semconv", openTelemetryVersion) testImplementation("io.opentelemetry", "opentelemetry-sdk", openTelemetryVersion) // EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 diff --git a/java-client/src/main/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearch.java b/java-client/src/main/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearch.java index 03d6dcfc6..ec8afb223 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearch.java +++ b/java-client/src/main/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearch.java @@ -31,6 +31,9 @@ import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.api.trace.Tracer; import io.opentelemetry.context.Scope; +import io.opentelemetry.semconv.HttpAttributes; +import io.opentelemetry.semconv.ServerAttributes; +import io.opentelemetry.semconv.UrlAttributes; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -69,18 +72,14 @@ public class OpenTelemetryForElasticsearch implements Instrumentation { "search" )); - private static final AttributeKey ATTR_DB_SYSTEM = AttributeKey.stringKey("db.system"); - private static final AttributeKey ATTR_DB_OPERATION = AttributeKey.stringKey("db.operation"); - private static final AttributeKey ATTR_DB_STATEMENT = AttributeKey.stringKey("db.statement"); - // Use Semantic Convention keys once opentelemetry-semconv is stable - //private static final AttributeKey ATTR_DB_SYSTEM = SemanticAttributes.DB_SYSTEM; - //private static final AttributeKey ATTR_DB_OPERATION = SemanticAttributes.DB_OPERATION; - //private static final AttributeKey ATTR_DB_STATEMENT = SemanticAttributes.DB_STATEMENT; + private static final AttributeKey ATTR_DB_SYSTEM = AttributeKey.stringKey("db.system.name"); + private static final AttributeKey ATTR_DB_OPERATION = AttributeKey.stringKey("db.operation.name"); + private static final AttributeKey ATTR_DB_QUERY = AttributeKey.stringKey("db.query.text"); - private static final AttributeKey ATTR_HTTP_REQUEST_METHOD = AttributeKey.stringKey("http.request.method"); - private static final AttributeKey ATTR_URL_FULL = AttributeKey.stringKey("url.full"); - private static final AttributeKey ATTR_SERVER_ADDRESS = AttributeKey.stringKey("server.address"); - private static final AttributeKey ATTR_SERVER_PORT = AttributeKey.longKey("server.port"); + private static final AttributeKey ATTR_HTTP_REQUEST_METHOD = HttpAttributes.HTTP_REQUEST_METHOD; + private static final AttributeKey ATTR_URL_FULL = UrlAttributes.URL_FULL; + private static final AttributeKey ATTR_SERVER_ADDRESS = ServerAttributes.SERVER_ADDRESS; + private static final AttributeKey ATTR_SERVER_PORT = ServerAttributes.SERVER_PORT; // Caching attributes keys to avoid unnecessary memory allocation private static final Map> attributesKeyCache = new ConcurrentHashMap<>(); @@ -222,7 +221,7 @@ public void beforeSendingHttpRequest(TransportHttpClient.Request httpRequest, Tr sb.append(StandardCharsets.UTF_8.decode(buf)); buf.reset(); } - span.setAttribute(ATTR_DB_STATEMENT, sb.toString()); + span.setAttribute(ATTR_DB_QUERY, sb.toString()); } } catch (Exception e) { logger.debug("Failed reading HTTP body content for an OpenTelemetry span.", e); diff --git a/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java b/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java index 66879470a..224fb2d56 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java @@ -38,8 +38,6 @@ import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; -import io.opentelemetry.semconv.trace.attributes.SemanticAttributes; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; @@ -57,6 +55,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; + public class OpenTelemetryForElasticsearchTest { private static final String INDEX = "test-index"; private static final String DOC_ID = "1234567"; @@ -100,7 +100,9 @@ public class OpenTelemetryForElasticsearchTest { " ]\n" + " }\n" + "}"; - public static final String DB_OPERATION = "db.operation"; + public static final String DB_SYSTEM = "db.system.name"; + public static final String DB_OPERATION = "db.operation.name"; + public static final String DB_QUERY = "db.query.text"; public static final String URL_FULL = "url.full"; public static final String SERVER_ADDRESS = "server.address"; public static final String SERVER_PORT = "server.port"; @@ -162,7 +164,7 @@ private static void setupHttpServer() throws IOException { private static void setupOTel() { Resource resource = Resource.getDefault() - .merge(Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "es-api-test"))); + .merge(Resource.create(Attributes.of(SERVICE_NAME, "es-api-test"))); spanExporter = new MockSpanExporter(); @@ -189,7 +191,7 @@ public void testGetRequest() throws IOException, InterruptedException { Assertions.assertEquals("get", span.getName()); Assertions.assertEquals("get", span.getAttributes().get(AttributeKey.stringKey(DB_OPERATION))); Assertions.assertEquals("GET", span.getAttributes().get(AttributeKey.stringKey(HTTP_REQUEST_METHOD))); - Assertions.assertEquals("elasticsearch", span.getAttributes().get(SemanticAttributes.DB_SYSTEM)); + Assertions.assertEquals("elasticsearch", span.getAttributes().get(AttributeKey.stringKey(DB_SYSTEM))); String url = "http://" + httpServer.getAddress().getHostString() + ":" + httpServer.getAddress().getPort() + "/" + INDEX + "/_doc/" + DOC_ID + "?refresh=true"; @@ -214,7 +216,7 @@ public void testSearchRequest() throws IOException, InterruptedException { Assertions.assertEquals(spanExporter.getSpans().size(), 1); SpanData span = spanExporter.getSpans().get(0); Assertions.assertEquals("search", span.getName()); - Assertions.assertEquals(queryAsString, span.getAttributes().get(SemanticAttributes.DB_STATEMENT)); + Assertions.assertEquals(queryAsString, span.getAttributes().get(AttributeKey.stringKey(DB_QUERY))); } @Test @@ -228,7 +230,7 @@ public void testAsyncSearchRequest() throws IOException, InterruptedException, T Assertions.assertEquals("search", span.getName()); // We're not capturing bodies by default - Assertions.assertNull(span.getAttributes().get(SemanticAttributes.DB_STATEMENT)); + Assertions.assertNull(span.getAttributes().get(AttributeKey.stringKey(DB_QUERY))); } private static class MockSpanExporter implements SpanExporter { From 76ba0f428de3e2ad77ff432e59b4d979ed4603cf Mon Sep 17 00:00:00 2001 From: Laura Trotta <153528055+l-trotta@users.noreply.github.com> Date: Tue, 3 Jun 2025 16:31:03 +0200 Subject: [PATCH 82/96] Added more setters in Rest5Builder (#1019) * added more configs in rest5builder * cleanup --- .../low_level/Rest5ClientBuilder.java | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java index 4df40f375..b4d5f573e 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java @@ -27,7 +27,9 @@ import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.routing.HttpRoutePlanner; import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.nio.ssl.BasicClientTlsStrategy; import org.apache.hc.core5.util.Timeout; import org.apache.hc.core5.util.VersionInfo; @@ -35,6 +37,7 @@ import javax.net.ssl.SSLContext; import java.io.IOException; import java.io.InputStream; +import java.net.ProxySelector; import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Locale; @@ -75,6 +78,9 @@ public final class Rest5ClientBuilder { private Header[] defaultHeaders = EMPTY_HEADERS; private Rest5Client.FailureListener failureListener; private SSLContext sslContext; + private HttpHost proxy; + private ProxySelector proxySelector; + private HttpRoutePlanner routePlanner; private String pathPrefix; private NodeSelector nodeSelector = NodeSelector.ANY; private boolean strictDeprecationMode = false; @@ -180,6 +186,24 @@ public Rest5ClientBuilder setSSLContext(SSLContext sslContext) { return this; } + public Rest5ClientBuilder setProxy(HttpHost proxy) { + Objects.requireNonNull(proxy, "proxy must not be null"); + this.proxy = proxy; + return this; + } + + public Rest5ClientBuilder setProxySelector(ProxySelector proxySelector) { + Objects.requireNonNull(proxySelector, "proxy selector must not be null"); + this.proxySelector = proxySelector; + return this; + } + + public Rest5ClientBuilder setRoutePlanner(HttpRoutePlanner routePlanner) { + Objects.requireNonNull(routePlanner, "route planner must not be null"); + this.routePlanner = routePlanner; + return this; + } + /** * Sets the default request headers, which will be sent along with each request. *

    @@ -374,6 +398,16 @@ private CloseableHttpAsyncClient createHttpClient() { .setTargetAuthenticationStrategy(new DefaultAuthenticationStrategy()) .setThreadFactory(new RestClientThreadFactory()); + if (this.proxy != null) { + httpClientBuilder.setProxy(this.proxy); + } + if (this.proxySelector != null) { + httpClientBuilder.setProxySelector(this.proxySelector); + } + if (this.routePlanner != null) { + httpClientBuilder.setRoutePlanner(this.routePlanner); + } + return httpClientBuilder.build(); } catch (NoSuchAlgorithmException e) { throw new IllegalStateException("could not create the default ssl context", e); From f4fdd50614e8d1e8aa1484a748d69d9cba3b93ea Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Fri, 6 Jun 2025 17:42:38 +0200 Subject: [PATCH 83/96] fix error codes in sniff test --- .../low_level/sniffer/ElasticsearchNodesSnifferTests.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java index a7694eac2..dd6294be9 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java @@ -288,8 +288,7 @@ private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme } List roles = Arrays.asList( - new String[] { "master", "data", "ingest", "data_content", "data_hot", "data_warm", "data_cold", "data_frozen" } - ); + "master", "data", "ingest", "data_content", "data_hot", "data_warm", "data_cold", "data_frozen"); Collections.shuffle(roles, getRandom()); generator.writeArrayFieldStart("roles"); for (String role : roles) { @@ -373,6 +372,6 @@ static SniffResponse buildResponse(String nodesInfoBody, List nodes) { } private static int randomErrorResponseCode() { - return randomIntBetween(400, 599); + return randomIntBetween(500, 599); } } From cc82ca679df2df504ccff3e7c0fea693f470aec4 Mon Sep 17 00:00:00 2001 From: Laura Trotta <153528055+l-trotta@users.noreply.github.com> Date: Wed, 11 Jun 2025 11:34:59 +0200 Subject: [PATCH 84/96] fix (#1022) --- CONTRIBUTING.md | 2 +- docs/reference/setup/installation.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 30a39b439..70f6a0ebe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,7 @@ before we can accept pull requests from you. ### Prerequisites -You need at least Java 11 to build the project, even though the code targets Java 8. The project is built with `Gradle`. You don't have to install it, and can use the provided `gradlew` that will install the required version if needed. +You need at least Java 17 to build the project. The project is built with `Gradle`. You don't have to install it, and can use the provided `gradlew` that will install the required version if needed. Docker is used for some tests to spawn an Elasticsearch server. diff --git a/docs/reference/setup/installation.md b/docs/reference/setup/installation.md index a535d919e..34d30658a 100644 --- a/docs/reference/setup/installation.md +++ b/docs/reference/setup/installation.md @@ -7,7 +7,7 @@ mapped_pages: Requirements: -* Java 8 or later. +* Java 17 or later. * A JSON object mapping library to allow seamless integration of your application classes with the Elasticsearch API. The Java client has support for [Jackson](https://github.com/FasterXML/jackson) or a [JSON-B](https://github.com/eclipse-ee4j/jsonb-api) library like [Eclipse Yasson](https://github.com/eclipse-ee4j/yasson). Releases are hosted on [Maven Central](https://search.maven.org/search?q=g:co.elastic.clients). If you are looking for a SNAPSHOT version, the Elastic Maven Snapshot repository is available at [https://snapshots.elastic.co/maven/](https://snapshots.elastic.co/maven/). From fc2acb38afd5dcb4b42c0c2a90a3c8d5f8238536 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Thu, 19 Jun 2025 03:40:49 -0400 Subject: [PATCH 85/96] [DOCS] Add wayfinding for shared GitHub and support links (take 2) (#1026) * Update index.md * Add wayfinding for shared GitHub and support links * Update release-highlights.md --- docs/reference/release-highlights.md | 29 ++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/docs/reference/release-highlights.md b/docs/reference/release-highlights.md index ad8ef15eb..8ead164ed 100644 --- a/docs/reference/release-highlights.md +++ b/docs/reference/release-highlights.md @@ -5,10 +5,35 @@ mapped_pages: # Release highlights [release-highlights] -These are the important new features and changes in minor releases. Every release also updates the Java API Client to the latest [API specification](https://github.com/elastic/elasticsearch-specification). This includes new APIs and bug fixes in the specification of existing APIs. +These are the important new features and changes in minor releases. Every release also updates the Java API Client to the latest [API specification](https://github.com/elastic/elasticsearch-specification). This includes new APIs, as well as bug fixes in the specification of existing APIs. -For a list of detailed changes, including bug fixes, please see the [GitHub project release notes](https://github.com/elastic/elasticsearch-java/releases). +For a list of detailed changes, including bug fixes, see the [GitHub project release notes](https://github.com/elastic/elasticsearch-java/releases). ## 9.0.0 [release-highlights-900] [Release notes](/release-notes/9-0-0.md) + +## Earlier versions + +To view release notes for earlier versions, use the version dropdown in the top right corner of this page. + +% To facilitate GitHub and community links, use the anchor pattern _version_X_X +% List up to 5 most recent releases + +### Recent releases + +#### 8.18 [_version_8_18] + +[Release notes](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/8.18/release-highlights.html) + +#### 8.17 [_version_8_17] + +[Release notes](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/8.17/release-highlights.html) + +#### 8.16 [_version_8_16] + +[Release notes](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/8.16/release-highlights.html) + +#### 8.15 [_version_8_15] + +[Release notes](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/8.15/release-highlights.html) \ No newline at end of file From f94a9002a53f04d8deeebb663c438447f98fe4fd Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Tue, 24 Jun 2025 10:19:39 +0200 Subject: [PATCH 86/96] [codegen] update to latest spec --- .../ElasticsearchAsyncClient.java | 428 +------------ .../elasticsearch/ElasticsearchClient.java | 428 +------------ .../elasticsearch/_types/InnerRetriever.java | 216 +++++++ .../elasticsearch/_types/LinearRetriever.java | 210 +++++++ .../elasticsearch/_types/PinnedRetriever.java | 294 +++++++++ .../_types/RescorerRetriever.java | 239 ++++++++ .../elasticsearch/_types/Retriever.java | 91 +++ .../elasticsearch/_types/RetrieverBase.java | 34 + .../_types/RetrieverBuilders.java | 54 ++ .../elasticsearch/_types/ScoreNormalizer.java | 68 ++ .../_types/SpecifiedDocument.java | 182 ++++++ .../aggregations/BoxplotAggregation.java | 40 ++ .../MedianAbsoluteDeviationAggregation.java | 40 ++ .../_types/aggregations/TDigest.java | 40 ++ .../aggregations/TDigestExecutionHint.java | 67 ++ .../_types/query_dsl/GeoGridQuery.java | 24 +- .../query_dsl/GeoGridQueryBuilders.java | 2 +- .../_types/query_dsl/Intervals.java | 60 ++ .../_types/query_dsl/IntervalsBuilders.java | 36 ++ .../_types/query_dsl/IntervalsQuery.java | 61 ++ .../query_dsl/IntervalsQueryBuilders.java | 36 ++ .../_types/query_dsl/IntervalsRange.java | 348 +++++++++++ .../_types/query_dsl/IntervalsRegexp.java | 245 ++++++++ .../_types/query_dsl/RuleQuery.java | 38 +- .../_types/query_dsl/WeightedTokensQuery.java | 38 +- .../cluster/AllocationExplainRequest.java | 3 +- .../cluster/ComponentTemplateSummary.java | 38 ++ .../ElasticsearchClusterAsyncClient.java | 24 +- .../cluster/ElasticsearchClusterClient.java | 24 +- .../cluster/GetClusterSettingsRequest.java | 5 +- .../cluster/GetClusterSettingsResponse.java | 30 +- .../cluster/PutClusterSettingsRequest.java | 12 + .../elasticsearch/core/BulkRequest.java | 4 + .../elasticsearch/core/GetRequest.java | 10 +- .../core/OpenPointInTimeRequest.java | 12 +- .../elasticsearch/core/ReindexRequest.java | 205 +------ .../core/SearchShardsRequest.java | 12 +- .../core/SearchTemplateRequest.java | 12 +- .../core/TermvectorsRequest.java | 3 +- .../core/UpdateByQueryRequest.java | 12 +- .../elasticsearch/core/search/Highlight.java | 46 +- .../core/search/SourceFilter.java | 52 ++ .../elasticsearch/doc-files/api-spec.html | 579 ++++++++++-------- .../LifecycleExplainManaged.java | 23 + .../indices/ClearCacheRequest.java | 12 +- .../indices/CloseIndexRequest.java | 12 +- .../elasticsearch/indices/DataStream.java | 40 ++ .../indices/DataStreamFailureStore.java | 222 +++++++ .../DataStreamFailureStoreTemplate.java | 222 +++++++ .../indices/DataStreamOptions.java | 177 ++++++ .../indices/DataStreamOptionsTemplate.java | 171 ++++++ .../DeleteDataStreamOptionsRequest.java | 330 ++++++++++ .../DeleteDataStreamOptionsResponse.java | 110 ++++ .../indices/DeleteIndexRequest.java | 12 +- .../ElasticsearchIndicesAsyncClient.java | 453 +++++++++++--- .../indices/ElasticsearchIndicesClient.java | 460 +++++++++++--- .../indices/ExistsAliasRequest.java | 12 +- .../elasticsearch/indices/ExistsRequest.java | 12 +- .../indices/FailureStoreLifecycle.java | 225 +++++++ .../FailureStoreLifecycleTemplate.java | 225 +++++++ .../elasticsearch/indices/FlushRequest.java | 12 +- .../indices/GetAliasRequest.java | 12 +- .../indices/GetDataLifecycleRequest.java | 12 +- .../indices/GetDataStreamOptionsRequest.java | 297 +++++++++ .../indices/GetDataStreamOptionsResponse.java | 189 ++++++ .../indices/GetDataStreamSettingsRequest.java | 247 ++++++++ .../GetDataStreamSettingsResponse.java | 188 ++++++ .../indices/GetFieldMappingRequest.java | 12 +- .../indices/GetMappingRequest.java | 12 +- .../elasticsearch/indices/IndexSettings.java | 9 +- .../indices/IndexTemplateSummary.java | 37 ++ .../elasticsearch/indices/OpenRequest.java | 12 +- .../indices/PutDataLifecycleRequest.java | 12 +- .../indices/PutDataStreamOptionsRequest.java | 414 +++++++++++++ .../indices/PutDataStreamOptionsResponse.java | 109 ++++ .../indices/PutDataStreamSettingsRequest.java | 383 ++++++++++++ .../PutDataStreamSettingsResponse.java | 189 ++++++ .../indices/PutIndicesSettingsRequest.java | 55 +- .../indices/PutMappingRequest.java | 59 +- .../elasticsearch/indices/RefreshRequest.java | 12 +- .../indices/ResolveClusterRequest.java | 27 +- .../indices/ResolveIndexRequest.java | 12 +- .../indices/SegmentsRequest.java | 12 +- .../indices/ValidateQueryRequest.java | 12 +- .../DataStreamWithOptions.java | 194 ++++++ .../DataStreamSettings.java | 241 ++++++++ .../DataStreamSettingsError.java | 187 ++++++ .../IndexSettingResults.java | 307 ++++++++++ .../UpdatedDataStreamSettings.java | 353 +++++++++++ .../ChatCompletionUnifiedRequest.java | 13 +- .../inference/CohereEmbeddingType.java | 4 + .../inference/CohereServiceSettings.java | 18 +- .../ElasticsearchInferenceAsyncClient.java | 198 ++++-- .../ElasticsearchInferenceClient.java | 198 ++++-- .../inference/GoogleVertexAITaskType.java | 4 + .../inference/HuggingFaceServiceSettings.java | 79 ++- .../inference/HuggingFaceTaskSettings.java | 202 ++++++ .../inference/HuggingFaceTaskType.java | 6 + .../InferenceEndpointInfoAlibabaCloudAI.java | 141 +++++ .../InferenceEndpointInfoAmazonBedrock.java | 141 +++++ .../InferenceEndpointInfoAnthropic.java | 141 +++++ .../InferenceEndpointInfoAzureAIStudio.java | 141 +++++ .../InferenceEndpointInfoAzureOpenAI.java | 141 +++++ .../InferenceEndpointInfoCohere.java | 141 +++++ .../inference/InferenceEndpointInfoELSER.java | 141 +++++ .../InferenceEndpointInfoElasticsearch.java | 141 +++++ .../InferenceEndpointInfoGoogleAIStudio.java | 141 +++++ .../InferenceEndpointInfoGoogleVertexAI.java | 141 +++++ .../InferenceEndpointInfoHuggingFace.java | 141 +++++ .../InferenceEndpointInfoMistral.java | 141 +++++ .../InferenceEndpointInfoOpenAI.java | 141 +++++ .../InferenceEndpointInfoVoyageAI.java | 141 +++++ .../InferenceEndpointInfoWatsonx.java | 141 +++++ .../elasticsearch/inference/Message.java | 163 ++++- .../inference/MistralServiceSettings.java | 4 +- .../inference/MistralTaskType.java | 4 + .../inference/PutAlibabacloudResponse.java | 6 +- .../inference/PutAmazonbedrockRequest.java | 2 +- .../inference/PutAmazonbedrockResponse.java | 6 +- .../inference/PutAnthropicResponse.java | 6 +- .../inference/PutAzureaistudioResponse.java | 6 +- .../inference/PutAzureopenaiResponse.java | 6 +- .../inference/PutCohereResponse.java | 6 +- .../inference/PutElasticsearchResponse.java | 6 +- .../inference/PutElserResponse.java | 6 +- .../inference/PutGoogleaistudioResponse.java | 6 +- .../inference/PutGooglevertexaiResponse.java | 6 +- .../inference/PutHuggingFaceRequest.java | 92 ++- .../inference/PutHuggingFaceResponse.java | 6 +- .../inference/PutMistralRequest.java | 8 +- .../inference/PutMistralResponse.java | 6 +- .../inference/PutOpenaiResponse.java | 6 +- .../elasticsearch/inference/PutRequest.java | 39 +- .../inference/PutVoyageaiResponse.java | 6 +- .../inference/PutWatsonxRequest.java | 6 +- .../inference/PutWatsonxResponse.java | 6 +- .../inference/RateLimitSetting.java | 56 +- .../inference/RequestChatCompletion.java | 169 ++++- .../inference/TaskTypeAlibabaCloudAI.java | 71 +++ .../inference/TaskTypeAmazonBedrock.java | 67 ++ .../inference/TaskTypeAnthropic.java | 65 ++ .../inference/TaskTypeAzureAIStudio.java | 67 ++ .../inference/TaskTypeAzureOpenAI.java | 67 ++ .../inference/TaskTypeCohere.java | 68 ++ .../inference/TaskTypeELSER.java | 64 ++ .../inference/TaskTypeElasticsearch.java | 69 +++ .../inference/TaskTypeGoogleAIStudio.java | 67 ++ .../inference/TaskTypeGoogleVertexAI.java | 67 ++ .../inference/TaskTypeHuggingFace.java | 71 +++ .../inference/TaskTypeMistral.java | 69 +++ .../inference/TaskTypeOpenAI.java | 68 ++ .../inference/TaskTypeVoyageAI.java | 67 ++ .../inference/TaskTypeWatsonx.java | 69 +++ .../inference/WatsonxServiceSettings.java | 6 +- .../inference/WatsonxTaskType.java | 4 + .../elasticsearch/ml/PutJobRequest.java | 39 +- .../ml/UpdateDatafeedRequest.java | 39 +- .../ElasticsearchSecurityAsyncClient.java | 8 + .../security/ElasticsearchSecurityClient.java | 8 + .../UpdateCrossClusterApiKeyRequest.java | 4 + .../elasticsearch/slm/GetStatsResponse.java | 26 +- .../slm/SnapshotPolicyStats.java | 253 ++++++++ .../snapshot/GetSnapshotRequest.java | 46 ++ .../elasticsearch/snapshot/SnapshotState.java | 87 +++ .../watcher/AckWatchRequest.java | 4 +- .../ElasticsearchWatcherAsyncClient.java | 17 +- .../watcher/ElasticsearchWatcherClient.java | 17 +- .../watcher/ExecuteWatchRequest.java | 3 +- 168 files changed, 13727 insertions(+), 2057 deletions(-) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/InnerRetriever.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/LinearRetriever.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/PinnedRetriever.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RescorerRetriever.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ScoreNormalizer.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SpecifiedDocument.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigestExecutionHint.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRange.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRegexp.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamFailureStore.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamFailureStoreTemplate.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptions.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptionsTemplate.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycle.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycleTemplate.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataStreamOptionsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataStreamOptionsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataStreamSettingsRequest.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataStreamSettingsResponse.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_stream_options/DataStreamWithOptions.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_stream_settings/DataStreamSettings.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/put_data_stream_settings/DataStreamSettingsError.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/put_data_stream_settings/IndexSettingResults.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/indices/put_data_stream_settings/UpdatedDataStreamSettings.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/HuggingFaceTaskSettings.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoAlibabaCloudAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoAmazonBedrock.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoAnthropic.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoAzureAIStudio.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoAzureOpenAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoCohere.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoELSER.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoElasticsearch.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoGoogleAIStudio.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoGoogleVertexAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoHuggingFace.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoMistral.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoOpenAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoVoyageAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/InferenceEndpointInfoWatsonx.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeAlibabaCloudAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeAmazonBedrock.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeAnthropic.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeAzureAIStudio.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeAzureOpenAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeCohere.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeELSER.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeElasticsearch.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeGoogleAIStudio.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeGoogleVertexAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeHuggingFace.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeMistral.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeOpenAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeVoyageAI.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/inference/TaskTypeWatsonx.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/slm/SnapshotPolicyStats.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotState.java diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java index ba9e9f78f..74fcb67cf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java @@ -533,6 +533,10 @@ public ElasticsearchXpackAsyncClient xpack() { * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @see Documentation @@ -707,6 +711,10 @@ public CompletableFuture bulk(BulkRequest request) { * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @param fn * a function that initializes a builder to create the @@ -881,6 +889,10 @@ public final CompletableFuture bulk(Function_bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @see Documentation @@ -4172,210 +4184,7 @@ public final CompletableFuture rankEval( * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

    - * NOTE: The reindex API makes no effort to handle ID collisions. The last - * document written will "win" but the order isn't usually predictable - * so it is not a good idea to rely on this behavior. Instead, make sure that - * IDs are unique by using a script. - *

    - * Running reindex asynchronously - *

    - * If the request contains wait_for_completion=false, Elasticsearch - * performs some preflight checks, launches the request, and returns a task you - * can use to cancel or get the status of the task. Elasticsearch creates a - * record of this task as a document at _tasks/<task_id>. - *

    - * Reindex from multiple sources - *

    - * If you have many sources to reindex it is generally better to reindex them - * one at a time rather than using a glob pattern to pick up multiple sources. - * That way you can resume the process if there are any errors by removing the - * partially completed source and starting over. It also makes parallelizing the - * process fairly simple: split the list of sources to reindex and run each list - * in parallel. - *

    - * For example, you can use a bash script like this: - * - *

    -	 * for index in i1 i2 i3 i4 i5; do
    -	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
    -	 *     "source": {
    -	 *       "index": "'$index'"
    -	 *     },
    -	 *     "dest": {
    -	 *       "index": "'$index'-reindexed"
    -	 *     }
    -	 *   }'
    -	 * done
    -	 * 
    -	 * 
    - *

    - * Throttling - *

    - * Set requests_per_second to any positive decimal number - * (1.4, 6, 1000, for example) to - * throttle the rate at which reindex issues batches of index operations. - * Requests are throttled by padding each batch with a wait time. To turn off - * throttling, set requests_per_second to -1. - *

    - * The throttling is done by waiting between batches so that the scroll that - * reindex uses internally can be given a timeout that takes into account the - * padding. The padding time is the difference between the batch size divided by - * the requests_per_second and the time spent writing. By default - * the batch size is 1000, so if requests_per_second - * is set to 500: - * - *

    -	 * target_time = 1000 / 500 per second = 2 seconds
    -	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    -	 * 
    -	 * 
    - *

    - * Since the batch is issued as a single bulk request, large batch sizes cause - * Elasticsearch to create many requests and then wait for a while before - * starting the next set. This is "bursty" instead of - * "smooth". - *

    - * Slicing - *

    - * Reindex supports sliced scroll to parallelize the reindexing process. This - * parallelization can improve efficiency and provide a convenient way to break - * the request down into smaller parts. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * You can slice a reindex request manually by providing a slice ID and total - * number of slices to each request. You can also let reindex automatically - * parallelize by using sliced scroll to slice on _id. The - * slices parameter specifies the number of slices to use. - *

    - * Adding slices to the reindex request just automates the manual - * process, creating sub-requests which means it has some quirks: - *

      - *
    • You can see these requests in the tasks API. These sub-requests are - * "child" tasks of the task for the request with slices.
    • - *
    • Fetching the status of the task for the request with slices - * only contains the status of completed slices.
    • - *
    • These sub-requests are individually addressable for things like - * cancellation and rethrottling.
    • - *
    • Rethrottling the request with slices will rethrottle the - * unfinished sub-request proportionally.
    • - *
    • Canceling the request with slices will cancel each - * sub-request.
    • - *
    • Due to the nature of slices, each sub-request won't get a - * perfectly even portion of the documents. All documents will be addressed, but - * some slices may be larger than others. Expect larger slices to have a more - * even distribution.
    • - *
    • Parameters like requests_per_second and - * max_docs on a request with slices are distributed - * proportionally to each sub-request. Combine that with the previous point - * about distribution being uneven and you should conclude that using - * max_docs with slices might not result in exactly - * max_docs documents being reindexed.
    • - *
    • Each sub-request gets a slightly different snapshot of the source, though - * these are all taken at approximately the same time.
    • - *
    - *

    - * If slicing automatically, setting slices to auto - * will choose a reasonable number for most indices. If slicing manually or - * otherwise tuning automatic slicing, use the following guidelines. - *

    - * Query performance is most efficient when the number of slices is equal to the - * number of shards in the index. If that number is large (for example, - * 500), choose a lower number as too many slices will hurt - * performance. Setting slices higher than the number of shards generally does - * not improve efficiency and adds overhead. - *

    - * Indexing performance scales linearly across available resources with the - * number of slices. - *

    - * Whether query or indexing performance dominates the runtime depends on the - * documents being reindexed and cluster resources. - *

    - * Modify documents during reindexing - *

    - * Like _update_by_query, reindex operations support a script that - * modifies the document. Unlike _update_by_query, the script is - * allowed to modify the document's metadata. - *

    - * Just as in _update_by_query, you can set ctx.op to - * change the operation that is run on the destination. For example, set - * ctx.op to noop if your script decides that the - * document doesn’t have to be indexed in the destination. This "no - * operation" will be reported in the noop counter in the - * response body. Set ctx.op to delete if your script - * decides that the document must be deleted from the destination. The deletion - * will be reported in the deleted counter in the response body. - * Setting ctx.op to anything else will return an error, as will - * setting any other field in ctx. - *

    - * Think of the possibilities! Just be careful; you are able to change: - *

      - *
    • _id
    • - *
    • _index
    • - *
    • _version
    • - *
    • _routing
    • - *
    - *

    - * Setting _version to null or clearing it from the - * ctx map is just like not sending the version in an indexing - * request. It will cause the document to be overwritten in the destination - * regardless of the version on the target or the version type you use in the - * reindex API. - *

    - * Reindex from remote - *

    - * Reindex supports reindexing from a remote Elasticsearch cluster. The - * host parameter must contain a scheme, host, port, and optional - * path. The username and password parameters are - * optional and when they are present the reindex operation will connect to the - * remote Elasticsearch node using basic authentication. Be sure to use HTTPS - * when using basic authentication or the password will be sent in plain text. - * There are a range of settings available to configure the behavior of the - * HTTPS connection. - *

    - * When using Elastic Cloud, it is also possible to authenticate against the - * remote cluster through the use of a valid API key. Remote hosts must be - * explicitly allowed with the reindex.remote.whitelist setting. It - * can be set to a comma delimited list of allowed remote host and port - * combinations. Scheme is ignored; only the host and port are used. For - * example: - * - *

    -	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
    -	 * 
    -	 * 
    - *

    - * The list of allowed hosts must be configured on any nodes that will - * coordinate the reindex. This feature should work with remote clusters of any - * version of Elasticsearch. This should enable you to upgrade from any version - * of Elasticsearch to the current version by reindexing from a cluster of the - * old version. - *

    - * WARNING: Elasticsearch does not support forward compatibility across major - * versions. For example, you cannot reindex from a 7.x cluster into a 6.x - * cluster. - *

    - * To enable queries sent to older versions of Elasticsearch, the - * query parameter is sent directly to the remote host without - * validation or modification. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * Reindexing from a remote server uses an on-heap buffer that defaults to a - * maximum size of 100mb. If the remote index includes very large documents - * you'll need to use a smaller batch size. It is also possible to set the - * socket read timeout on the remote connection with the - * socket_timeout field and the connection timeout with the - * connect_timeout field. Both default to 30 seconds. - *

    - * Configuring SSL parameters - *

    - * Reindex from remote supports configurable SSL settings. These must be - * specified in the elasticsearch.yml file, with the exception of - * the secure settings, which you add in the Elasticsearch keystore. It is not - * possible to configure SSL in the body of the reindex request. + * Refer to the linked documentation for examples of how to reindex documents. * * @see Documentation @@ -4454,210 +4263,7 @@ public CompletableFuture reindex(ReindexRequest request) { * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

    - * NOTE: The reindex API makes no effort to handle ID collisions. The last - * document written will "win" but the order isn't usually predictable - * so it is not a good idea to rely on this behavior. Instead, make sure that - * IDs are unique by using a script. - *

    - * Running reindex asynchronously - *

    - * If the request contains wait_for_completion=false, Elasticsearch - * performs some preflight checks, launches the request, and returns a task you - * can use to cancel or get the status of the task. Elasticsearch creates a - * record of this task as a document at _tasks/<task_id>. - *

    - * Reindex from multiple sources - *

    - * If you have many sources to reindex it is generally better to reindex them - * one at a time rather than using a glob pattern to pick up multiple sources. - * That way you can resume the process if there are any errors by removing the - * partially completed source and starting over. It also makes parallelizing the - * process fairly simple: split the list of sources to reindex and run each list - * in parallel. - *

    - * For example, you can use a bash script like this: - * - *

    -	 * for index in i1 i2 i3 i4 i5; do
    -	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
    -	 *     "source": {
    -	 *       "index": "'$index'"
    -	 *     },
    -	 *     "dest": {
    -	 *       "index": "'$index'-reindexed"
    -	 *     }
    -	 *   }'
    -	 * done
    -	 * 
    -	 * 
    - *

    - * Throttling - *

    - * Set requests_per_second to any positive decimal number - * (1.4, 6, 1000, for example) to - * throttle the rate at which reindex issues batches of index operations. - * Requests are throttled by padding each batch with a wait time. To turn off - * throttling, set requests_per_second to -1. - *

    - * The throttling is done by waiting between batches so that the scroll that - * reindex uses internally can be given a timeout that takes into account the - * padding. The padding time is the difference between the batch size divided by - * the requests_per_second and the time spent writing. By default - * the batch size is 1000, so if requests_per_second - * is set to 500: - * - *

    -	 * target_time = 1000 / 500 per second = 2 seconds
    -	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    -	 * 
    -	 * 
    - *

    - * Since the batch is issued as a single bulk request, large batch sizes cause - * Elasticsearch to create many requests and then wait for a while before - * starting the next set. This is "bursty" instead of - * "smooth". - *

    - * Slicing - *

    - * Reindex supports sliced scroll to parallelize the reindexing process. This - * parallelization can improve efficiency and provide a convenient way to break - * the request down into smaller parts. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * You can slice a reindex request manually by providing a slice ID and total - * number of slices to each request. You can also let reindex automatically - * parallelize by using sliced scroll to slice on _id. The - * slices parameter specifies the number of slices to use. - *

    - * Adding slices to the reindex request just automates the manual - * process, creating sub-requests which means it has some quirks: - *

      - *
    • You can see these requests in the tasks API. These sub-requests are - * "child" tasks of the task for the request with slices.
    • - *
    • Fetching the status of the task for the request with slices - * only contains the status of completed slices.
    • - *
    • These sub-requests are individually addressable for things like - * cancellation and rethrottling.
    • - *
    • Rethrottling the request with slices will rethrottle the - * unfinished sub-request proportionally.
    • - *
    • Canceling the request with slices will cancel each - * sub-request.
    • - *
    • Due to the nature of slices, each sub-request won't get a - * perfectly even portion of the documents. All documents will be addressed, but - * some slices may be larger than others. Expect larger slices to have a more - * even distribution.
    • - *
    • Parameters like requests_per_second and - * max_docs on a request with slices are distributed - * proportionally to each sub-request. Combine that with the previous point - * about distribution being uneven and you should conclude that using - * max_docs with slices might not result in exactly - * max_docs documents being reindexed.
    • - *
    • Each sub-request gets a slightly different snapshot of the source, though - * these are all taken at approximately the same time.
    • - *
    - *

    - * If slicing automatically, setting slices to auto - * will choose a reasonable number for most indices. If slicing manually or - * otherwise tuning automatic slicing, use the following guidelines. - *

    - * Query performance is most efficient when the number of slices is equal to the - * number of shards in the index. If that number is large (for example, - * 500), choose a lower number as too many slices will hurt - * performance. Setting slices higher than the number of shards generally does - * not improve efficiency and adds overhead. - *

    - * Indexing performance scales linearly across available resources with the - * number of slices. - *

    - * Whether query or indexing performance dominates the runtime depends on the - * documents being reindexed and cluster resources. - *

    - * Modify documents during reindexing - *

    - * Like _update_by_query, reindex operations support a script that - * modifies the document. Unlike _update_by_query, the script is - * allowed to modify the document's metadata. - *

    - * Just as in _update_by_query, you can set ctx.op to - * change the operation that is run on the destination. For example, set - * ctx.op to noop if your script decides that the - * document doesn’t have to be indexed in the destination. This "no - * operation" will be reported in the noop counter in the - * response body. Set ctx.op to delete if your script - * decides that the document must be deleted from the destination. The deletion - * will be reported in the deleted counter in the response body. - * Setting ctx.op to anything else will return an error, as will - * setting any other field in ctx. - *

    - * Think of the possibilities! Just be careful; you are able to change: - *

      - *
    • _id
    • - *
    • _index
    • - *
    • _version
    • - *
    • _routing
    • - *
    - *

    - * Setting _version to null or clearing it from the - * ctx map is just like not sending the version in an indexing - * request. It will cause the document to be overwritten in the destination - * regardless of the version on the target or the version type you use in the - * reindex API. - *

    - * Reindex from remote - *

    - * Reindex supports reindexing from a remote Elasticsearch cluster. The - * host parameter must contain a scheme, host, port, and optional - * path. The username and password parameters are - * optional and when they are present the reindex operation will connect to the - * remote Elasticsearch node using basic authentication. Be sure to use HTTPS - * when using basic authentication or the password will be sent in plain text. - * There are a range of settings available to configure the behavior of the - * HTTPS connection. - *

    - * When using Elastic Cloud, it is also possible to authenticate against the - * remote cluster through the use of a valid API key. Remote hosts must be - * explicitly allowed with the reindex.remote.whitelist setting. It - * can be set to a comma delimited list of allowed remote host and port - * combinations. Scheme is ignored; only the host and port are used. For - * example: - * - *

    -	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
    -	 * 
    -	 * 
    - *

    - * The list of allowed hosts must be configured on any nodes that will - * coordinate the reindex. This feature should work with remote clusters of any - * version of Elasticsearch. This should enable you to upgrade from any version - * of Elasticsearch to the current version by reindexing from a cluster of the - * old version. - *

    - * WARNING: Elasticsearch does not support forward compatibility across major - * versions. For example, you cannot reindex from a 7.x cluster into a 6.x - * cluster. - *

    - * To enable queries sent to older versions of Elasticsearch, the - * query parameter is sent directly to the remote host without - * validation or modification. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * Reindexing from a remote server uses an on-heap buffer that defaults to a - * maximum size of 100mb. If the remote index includes very large documents - * you'll need to use a smaller batch size. It is also possible to set the - * socket read timeout on the remote connection with the - * socket_timeout field and the connection timeout with the - * connect_timeout field. Both default to 30 seconds. - *

    - * Configuring SSL parameters - *

    - * Reindex from remote supports configurable SSL settings. These must be - * specified in the elasticsearch.yml file, with the exception of - * the secure settings, which you add in the Elasticsearch keystore. It is not - * possible to configure SSL in the body of the reindex request. + * Refer to the linked documentation for examples of how to reindex documents. * * @param fn * a function that initializes a builder to create the @@ -6319,7 +5925,8 @@ public final CompletableFuture termsEnum( * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use - * routing only to hit a particular shard. + * routing only to hit a particular shard. Refer to the linked + * documentation for detailed examples of how to use this API. * * @see Documentation @@ -6387,7 +5994,8 @@ public CompletableFuture termvectors(Termvector * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use - * routing only to hit a particular shard. + * routing only to hit a particular shard. Refer to the linked + * documentation for detailed examples of how to use this API. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java index 980a853d6..d55926d44 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java @@ -533,6 +533,10 @@ public ElasticsearchXpackClient xpack() { * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @see Documentation @@ -707,6 +711,10 @@ public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchE * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @param fn * a function that initializes a builder to create the @@ -882,6 +890,10 @@ public final BulkResponse bulk(Function_bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @see Documentation @@ -4201,210 +4213,7 @@ public final RankEvalResponse rankEval(Functionmax_docs documents into the * target or it has gone through every document in the source query. *

    - * NOTE: The reindex API makes no effort to handle ID collisions. The last - * document written will "win" but the order isn't usually predictable - * so it is not a good idea to rely on this behavior. Instead, make sure that - * IDs are unique by using a script. - *

    - * Running reindex asynchronously - *

    - * If the request contains wait_for_completion=false, Elasticsearch - * performs some preflight checks, launches the request, and returns a task you - * can use to cancel or get the status of the task. Elasticsearch creates a - * record of this task as a document at _tasks/<task_id>. - *

    - * Reindex from multiple sources - *

    - * If you have many sources to reindex it is generally better to reindex them - * one at a time rather than using a glob pattern to pick up multiple sources. - * That way you can resume the process if there are any errors by removing the - * partially completed source and starting over. It also makes parallelizing the - * process fairly simple: split the list of sources to reindex and run each list - * in parallel. - *

    - * For example, you can use a bash script like this: - * - *

    -	 * for index in i1 i2 i3 i4 i5; do
    -	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
    -	 *     "source": {
    -	 *       "index": "'$index'"
    -	 *     },
    -	 *     "dest": {
    -	 *       "index": "'$index'-reindexed"
    -	 *     }
    -	 *   }'
    -	 * done
    -	 * 
    -	 * 
    - *

    - * Throttling - *

    - * Set requests_per_second to any positive decimal number - * (1.4, 6, 1000, for example) to - * throttle the rate at which reindex issues batches of index operations. - * Requests are throttled by padding each batch with a wait time. To turn off - * throttling, set requests_per_second to -1. - *

    - * The throttling is done by waiting between batches so that the scroll that - * reindex uses internally can be given a timeout that takes into account the - * padding. The padding time is the difference between the batch size divided by - * the requests_per_second and the time spent writing. By default - * the batch size is 1000, so if requests_per_second - * is set to 500: - * - *

    -	 * target_time = 1000 / 500 per second = 2 seconds
    -	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    -	 * 
    -	 * 
    - *

    - * Since the batch is issued as a single bulk request, large batch sizes cause - * Elasticsearch to create many requests and then wait for a while before - * starting the next set. This is "bursty" instead of - * "smooth". - *

    - * Slicing - *

    - * Reindex supports sliced scroll to parallelize the reindexing process. This - * parallelization can improve efficiency and provide a convenient way to break - * the request down into smaller parts. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * You can slice a reindex request manually by providing a slice ID and total - * number of slices to each request. You can also let reindex automatically - * parallelize by using sliced scroll to slice on _id. The - * slices parameter specifies the number of slices to use. - *

    - * Adding slices to the reindex request just automates the manual - * process, creating sub-requests which means it has some quirks: - *

      - *
    • You can see these requests in the tasks API. These sub-requests are - * "child" tasks of the task for the request with slices.
    • - *
    • Fetching the status of the task for the request with slices - * only contains the status of completed slices.
    • - *
    • These sub-requests are individually addressable for things like - * cancellation and rethrottling.
    • - *
    • Rethrottling the request with slices will rethrottle the - * unfinished sub-request proportionally.
    • - *
    • Canceling the request with slices will cancel each - * sub-request.
    • - *
    • Due to the nature of slices, each sub-request won't get a - * perfectly even portion of the documents. All documents will be addressed, but - * some slices may be larger than others. Expect larger slices to have a more - * even distribution.
    • - *
    • Parameters like requests_per_second and - * max_docs on a request with slices are distributed - * proportionally to each sub-request. Combine that with the previous point - * about distribution being uneven and you should conclude that using - * max_docs with slices might not result in exactly - * max_docs documents being reindexed.
    • - *
    • Each sub-request gets a slightly different snapshot of the source, though - * these are all taken at approximately the same time.
    • - *
    - *

    - * If slicing automatically, setting slices to auto - * will choose a reasonable number for most indices. If slicing manually or - * otherwise tuning automatic slicing, use the following guidelines. - *

    - * Query performance is most efficient when the number of slices is equal to the - * number of shards in the index. If that number is large (for example, - * 500), choose a lower number as too many slices will hurt - * performance. Setting slices higher than the number of shards generally does - * not improve efficiency and adds overhead. - *

    - * Indexing performance scales linearly across available resources with the - * number of slices. - *

    - * Whether query or indexing performance dominates the runtime depends on the - * documents being reindexed and cluster resources. - *

    - * Modify documents during reindexing - *

    - * Like _update_by_query, reindex operations support a script that - * modifies the document. Unlike _update_by_query, the script is - * allowed to modify the document's metadata. - *

    - * Just as in _update_by_query, you can set ctx.op to - * change the operation that is run on the destination. For example, set - * ctx.op to noop if your script decides that the - * document doesn’t have to be indexed in the destination. This "no - * operation" will be reported in the noop counter in the - * response body. Set ctx.op to delete if your script - * decides that the document must be deleted from the destination. The deletion - * will be reported in the deleted counter in the response body. - * Setting ctx.op to anything else will return an error, as will - * setting any other field in ctx. - *

    - * Think of the possibilities! Just be careful; you are able to change: - *

      - *
    • _id
    • - *
    • _index
    • - *
    • _version
    • - *
    • _routing
    • - *
    - *

    - * Setting _version to null or clearing it from the - * ctx map is just like not sending the version in an indexing - * request. It will cause the document to be overwritten in the destination - * regardless of the version on the target or the version type you use in the - * reindex API. - *

    - * Reindex from remote - *

    - * Reindex supports reindexing from a remote Elasticsearch cluster. The - * host parameter must contain a scheme, host, port, and optional - * path. The username and password parameters are - * optional and when they are present the reindex operation will connect to the - * remote Elasticsearch node using basic authentication. Be sure to use HTTPS - * when using basic authentication or the password will be sent in plain text. - * There are a range of settings available to configure the behavior of the - * HTTPS connection. - *

    - * When using Elastic Cloud, it is also possible to authenticate against the - * remote cluster through the use of a valid API key. Remote hosts must be - * explicitly allowed with the reindex.remote.whitelist setting. It - * can be set to a comma delimited list of allowed remote host and port - * combinations. Scheme is ignored; only the host and port are used. For - * example: - * - *

    -	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
    -	 * 
    -	 * 
    - *

    - * The list of allowed hosts must be configured on any nodes that will - * coordinate the reindex. This feature should work with remote clusters of any - * version of Elasticsearch. This should enable you to upgrade from any version - * of Elasticsearch to the current version by reindexing from a cluster of the - * old version. - *

    - * WARNING: Elasticsearch does not support forward compatibility across major - * versions. For example, you cannot reindex from a 7.x cluster into a 6.x - * cluster. - *

    - * To enable queries sent to older versions of Elasticsearch, the - * query parameter is sent directly to the remote host without - * validation or modification. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * Reindexing from a remote server uses an on-heap buffer that defaults to a - * maximum size of 100mb. If the remote index includes very large documents - * you'll need to use a smaller batch size. It is also possible to set the - * socket read timeout on the remote connection with the - * socket_timeout field and the connection timeout with the - * connect_timeout field. Both default to 30 seconds. - *

    - * Configuring SSL parameters - *

    - * Reindex from remote supports configurable SSL settings. These must be - * specified in the elasticsearch.yml file, with the exception of - * the secure settings, which you add in the Elasticsearch keystore. It is not - * possible to configure SSL in the body of the reindex request. + * Refer to the linked documentation for examples of how to reindex documents. * * @see Documentation @@ -4483,210 +4292,7 @@ public ReindexResponse reindex(ReindexRequest request) throws IOException, Elast * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

    - * NOTE: The reindex API makes no effort to handle ID collisions. The last - * document written will "win" but the order isn't usually predictable - * so it is not a good idea to rely on this behavior. Instead, make sure that - * IDs are unique by using a script. - *

    - * Running reindex asynchronously - *

    - * If the request contains wait_for_completion=false, Elasticsearch - * performs some preflight checks, launches the request, and returns a task you - * can use to cancel or get the status of the task. Elasticsearch creates a - * record of this task as a document at _tasks/<task_id>. - *

    - * Reindex from multiple sources - *

    - * If you have many sources to reindex it is generally better to reindex them - * one at a time rather than using a glob pattern to pick up multiple sources. - * That way you can resume the process if there are any errors by removing the - * partially completed source and starting over. It also makes parallelizing the - * process fairly simple: split the list of sources to reindex and run each list - * in parallel. - *

    - * For example, you can use a bash script like this: - * - *

    -	 * for index in i1 i2 i3 i4 i5; do
    -	 *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
    -	 *     "source": {
    -	 *       "index": "'$index'"
    -	 *     },
    -	 *     "dest": {
    -	 *       "index": "'$index'-reindexed"
    -	 *     }
    -	 *   }'
    -	 * done
    -	 * 
    -	 * 
    - *

    - * Throttling - *

    - * Set requests_per_second to any positive decimal number - * (1.4, 6, 1000, for example) to - * throttle the rate at which reindex issues batches of index operations. - * Requests are throttled by padding each batch with a wait time. To turn off - * throttling, set requests_per_second to -1. - *

    - * The throttling is done by waiting between batches so that the scroll that - * reindex uses internally can be given a timeout that takes into account the - * padding. The padding time is the difference between the batch size divided by - * the requests_per_second and the time spent writing. By default - * the batch size is 1000, so if requests_per_second - * is set to 500: - * - *

    -	 * target_time = 1000 / 500 per second = 2 seconds
    -	 * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    -	 * 
    -	 * 
    - *

    - * Since the batch is issued as a single bulk request, large batch sizes cause - * Elasticsearch to create many requests and then wait for a while before - * starting the next set. This is "bursty" instead of - * "smooth". - *

    - * Slicing - *

    - * Reindex supports sliced scroll to parallelize the reindexing process. This - * parallelization can improve efficiency and provide a convenient way to break - * the request down into smaller parts. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * You can slice a reindex request manually by providing a slice ID and total - * number of slices to each request. You can also let reindex automatically - * parallelize by using sliced scroll to slice on _id. The - * slices parameter specifies the number of slices to use. - *

    - * Adding slices to the reindex request just automates the manual - * process, creating sub-requests which means it has some quirks: - *

      - *
    • You can see these requests in the tasks API. These sub-requests are - * "child" tasks of the task for the request with slices.
    • - *
    • Fetching the status of the task for the request with slices - * only contains the status of completed slices.
    • - *
    • These sub-requests are individually addressable for things like - * cancellation and rethrottling.
    • - *
    • Rethrottling the request with slices will rethrottle the - * unfinished sub-request proportionally.
    • - *
    • Canceling the request with slices will cancel each - * sub-request.
    • - *
    • Due to the nature of slices, each sub-request won't get a - * perfectly even portion of the documents. All documents will be addressed, but - * some slices may be larger than others. Expect larger slices to have a more - * even distribution.
    • - *
    • Parameters like requests_per_second and - * max_docs on a request with slices are distributed - * proportionally to each sub-request. Combine that with the previous point - * about distribution being uneven and you should conclude that using - * max_docs with slices might not result in exactly - * max_docs documents being reindexed.
    • - *
    • Each sub-request gets a slightly different snapshot of the source, though - * these are all taken at approximately the same time.
    • - *
    - *

    - * If slicing automatically, setting slices to auto - * will choose a reasonable number for most indices. If slicing manually or - * otherwise tuning automatic slicing, use the following guidelines. - *

    - * Query performance is most efficient when the number of slices is equal to the - * number of shards in the index. If that number is large (for example, - * 500), choose a lower number as too many slices will hurt - * performance. Setting slices higher than the number of shards generally does - * not improve efficiency and adds overhead. - *

    - * Indexing performance scales linearly across available resources with the - * number of slices. - *

    - * Whether query or indexing performance dominates the runtime depends on the - * documents being reindexed and cluster resources. - *

    - * Modify documents during reindexing - *

    - * Like _update_by_query, reindex operations support a script that - * modifies the document. Unlike _update_by_query, the script is - * allowed to modify the document's metadata. - *

    - * Just as in _update_by_query, you can set ctx.op to - * change the operation that is run on the destination. For example, set - * ctx.op to noop if your script decides that the - * document doesn’t have to be indexed in the destination. This "no - * operation" will be reported in the noop counter in the - * response body. Set ctx.op to delete if your script - * decides that the document must be deleted from the destination. The deletion - * will be reported in the deleted counter in the response body. - * Setting ctx.op to anything else will return an error, as will - * setting any other field in ctx. - *

    - * Think of the possibilities! Just be careful; you are able to change: - *

      - *
    • _id
    • - *
    • _index
    • - *
    • _version
    • - *
    • _routing
    • - *
    - *

    - * Setting _version to null or clearing it from the - * ctx map is just like not sending the version in an indexing - * request. It will cause the document to be overwritten in the destination - * regardless of the version on the target or the version type you use in the - * reindex API. - *

    - * Reindex from remote - *

    - * Reindex supports reindexing from a remote Elasticsearch cluster. The - * host parameter must contain a scheme, host, port, and optional - * path. The username and password parameters are - * optional and when they are present the reindex operation will connect to the - * remote Elasticsearch node using basic authentication. Be sure to use HTTPS - * when using basic authentication or the password will be sent in plain text. - * There are a range of settings available to configure the behavior of the - * HTTPS connection. - *

    - * When using Elastic Cloud, it is also possible to authenticate against the - * remote cluster through the use of a valid API key. Remote hosts must be - * explicitly allowed with the reindex.remote.whitelist setting. It - * can be set to a comma delimited list of allowed remote host and port - * combinations. Scheme is ignored; only the host and port are used. For - * example: - * - *

    -	 * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
    -	 * 
    -	 * 
    - *

    - * The list of allowed hosts must be configured on any nodes that will - * coordinate the reindex. This feature should work with remote clusters of any - * version of Elasticsearch. This should enable you to upgrade from any version - * of Elasticsearch to the current version by reindexing from a cluster of the - * old version. - *

    - * WARNING: Elasticsearch does not support forward compatibility across major - * versions. For example, you cannot reindex from a 7.x cluster into a 6.x - * cluster. - *

    - * To enable queries sent to older versions of Elasticsearch, the - * query parameter is sent directly to the remote host without - * validation or modification. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * Reindexing from a remote server uses an on-heap buffer that defaults to a - * maximum size of 100mb. If the remote index includes very large documents - * you'll need to use a smaller batch size. It is also possible to set the - * socket read timeout on the remote connection with the - * socket_timeout field and the connection timeout with the - * connect_timeout field. Both default to 30 seconds. - *

    - * Configuring SSL parameters - *

    - * Reindex from remote supports configurable SSL settings. These must be - * specified in the elasticsearch.yml file, with the exception of - * the secure settings, which you add in the Elasticsearch keystore. It is not - * possible to configure SSL in the body of the reindex request. + * Refer to the linked documentation for examples of how to reindex documents. * * @param fn * a function that initializes a builder to create the @@ -6364,7 +5970,8 @@ public final TermsEnumResponse termsEnum(Functionrouting only to hit a particular shard. + * routing only to hit a particular shard. Refer to the linked + * documentation for detailed examples of how to use this API. * * @see Documentation @@ -6433,7 +6040,8 @@ public TermvectorsResponse termvectors(TermvectorsRequest * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use - * routing only to hit a particular shard. + * routing only to hit a particular shard. Refer to the linked + * documentation for detailed examples of how to use this API. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/InnerRetriever.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/InnerRetriever.java new file mode 100644 index 000000000..494c55d96 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/InnerRetriever.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Float; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.InnerRetriever + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class InnerRetriever implements JsonpSerializable { + private final Retriever retriever; + + private final float weight; + + private final ScoreNormalizer normalizer; + + // --------------------------------------------------------------------------------------------- + + private InnerRetriever(Builder builder) { + + this.retriever = ApiTypeHelper.requireNonNull(builder.retriever, this, "retriever"); + this.weight = ApiTypeHelper.requireNonNull(builder.weight, this, "weight", 0); + this.normalizer = ApiTypeHelper.requireNonNull(builder.normalizer, this, "normalizer"); + + } + + public static InnerRetriever of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code retriever} + */ + public final Retriever retriever() { + return this.retriever; + } + + /** + * Required - API name: {@code weight} + */ + public final float weight() { + return this.weight; + } + + /** + * Required - API name: {@code normalizer} + */ + public final ScoreNormalizer normalizer() { + return this.normalizer; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("retriever"); + this.retriever.serialize(generator, mapper); + + generator.writeKey("weight"); + generator.write(this.weight); + + generator.writeKey("normalizer"); + this.normalizer.serialize(generator, mapper); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link InnerRetriever}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Retriever retriever; + + private Float weight; + + private ScoreNormalizer normalizer; + + /** + * Required - API name: {@code retriever} + */ + public final Builder retriever(Retriever value) { + this.retriever = value; + return this; + } + + /** + * Required - API name: {@code retriever} + */ + public final Builder retriever(Function> fn) { + return this.retriever(fn.apply(new Retriever.Builder()).build()); + } + + /** + * Required - API name: {@code retriever} + */ + public final Builder retriever(RetrieverVariant value) { + this.retriever = value._toRetriever(); + return this; + } + + /** + * Required - API name: {@code weight} + */ + public final Builder weight(float value) { + this.weight = value; + return this; + } + + /** + * Required - API name: {@code normalizer} + */ + public final Builder normalizer(ScoreNormalizer value) { + this.normalizer = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link InnerRetriever}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public InnerRetriever build() { + _checkSingleUse(); + + return new InnerRetriever(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link InnerRetriever} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + InnerRetriever::setupInnerRetrieverDeserializer); + + protected static void setupInnerRetrieverDeserializer(ObjectDeserializer op) { + + op.add(Builder::retriever, Retriever._DESERIALIZER, "retriever"); + op.add(Builder::weight, JsonpDeserializer.floatDeserializer(), "weight"); + op.add(Builder::normalizer, ScoreNormalizer._DESERIALIZER, "normalizer"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/LinearRetriever.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/LinearRetriever.java new file mode 100644 index 000000000..7a19c531f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/LinearRetriever.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.LinearRetriever + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class LinearRetriever extends RetrieverBase implements RetrieverVariant { + private final List retrievers; + + private final int rankWindowSize; + + // --------------------------------------------------------------------------------------------- + + private LinearRetriever(Builder builder) { + super(builder); + + this.retrievers = ApiTypeHelper.unmodifiable(builder.retrievers); + this.rankWindowSize = ApiTypeHelper.requireNonNull(builder.rankWindowSize, this, "rankWindowSize", 0); + + } + + public static LinearRetriever of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Retriever variant kind. + */ + @Override + public Retriever.Kind _retrieverKind() { + return Retriever.Kind.Linear; + } + + /** + * Inner retrievers. + *

    + * API name: {@code retrievers} + */ + public final List retrievers() { + return this.retrievers; + } + + /** + * Required - API name: {@code rank_window_size} + */ + public final int rankWindowSize() { + return this.rankWindowSize; + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + super.serializeInternal(generator, mapper); + if (ApiTypeHelper.isDefined(this.retrievers)) { + generator.writeKey("retrievers"); + generator.writeStartArray(); + for (InnerRetriever item0 : this.retrievers) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("rank_window_size"); + generator.write(this.rankWindowSize); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link LinearRetriever}. + */ + + public static class Builder extends RetrieverBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private List retrievers; + + private Integer rankWindowSize; + + /** + * Inner retrievers. + *

    + * API name: {@code retrievers} + *

    + * Adds all elements of list to retrievers. + */ + public final Builder retrievers(List list) { + this.retrievers = _listAddAll(this.retrievers, list); + return this; + } + + /** + * Inner retrievers. + *

    + * API name: {@code retrievers} + *

    + * Adds one or more values to retrievers. + */ + public final Builder retrievers(InnerRetriever value, InnerRetriever... values) { + this.retrievers = _listAdd(this.retrievers, value, values); + return this; + } + + /** + * Inner retrievers. + *

    + * API name: {@code retrievers} + *

    + * Adds a value to retrievers using a builder lambda. + */ + public final Builder retrievers(Function> fn) { + return retrievers(fn.apply(new InnerRetriever.Builder()).build()); + } + + /** + * Required - API name: {@code rank_window_size} + */ + public final Builder rankWindowSize(int value) { + this.rankWindowSize = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link LinearRetriever}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public LinearRetriever build() { + _checkSingleUse(); + + return new LinearRetriever(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link LinearRetriever} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + LinearRetriever::setupLinearRetrieverDeserializer); + + protected static void setupLinearRetrieverDeserializer(ObjectDeserializer op) { + RetrieverBase.setupRetrieverBaseDeserializer(op); + op.add(Builder::retrievers, JsonpDeserializer.arrayDeserializer(InnerRetriever._DESERIALIZER), "retrievers"); + op.add(Builder::rankWindowSize, JsonpDeserializer.integerDeserializer(), "rank_window_size"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/PinnedRetriever.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/PinnedRetriever.java new file mode 100644 index 000000000..b696e3eee --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/PinnedRetriever.java @@ -0,0 +1,294 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.PinnedRetriever + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class PinnedRetriever extends RetrieverBase implements RetrieverVariant { + private final Retriever retriever; + + private final List ids; + + private final List docs; + + private final int rankWindowSize; + + // --------------------------------------------------------------------------------------------- + + private PinnedRetriever(Builder builder) { + super(builder); + + this.retriever = ApiTypeHelper.requireNonNull(builder.retriever, this, "retriever"); + this.ids = ApiTypeHelper.unmodifiable(builder.ids); + this.docs = ApiTypeHelper.unmodifiable(builder.docs); + this.rankWindowSize = ApiTypeHelper.requireNonNull(builder.rankWindowSize, this, "rankWindowSize", 0); + + } + + public static PinnedRetriever of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Retriever variant kind. + */ + @Override + public Retriever.Kind _retrieverKind() { + return Retriever.Kind.Pinned; + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Retriever retriever() { + return this.retriever; + } + + /** + * API name: {@code ids} + */ + public final List ids() { + return this.ids; + } + + /** + * API name: {@code docs} + */ + public final List docs() { + return this.docs; + } + + /** + * Required - API name: {@code rank_window_size} + */ + public final int rankWindowSize() { + return this.rankWindowSize; + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + super.serializeInternal(generator, mapper); + generator.writeKey("retriever"); + this.retriever.serialize(generator, mapper); + + if (ApiTypeHelper.isDefined(this.ids)) { + generator.writeKey("ids"); + generator.writeStartArray(); + for (String item0 : this.ids) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.docs)) { + generator.writeKey("docs"); + generator.writeStartArray(); + for (SpecifiedDocument item0 : this.docs) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("rank_window_size"); + generator.write(this.rankWindowSize); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PinnedRetriever}. + */ + + public static class Builder extends RetrieverBase.AbstractBuilder + implements + ObjectBuilder { + private Retriever retriever; + + @Nullable + private List ids; + + @Nullable + private List docs; + + private Integer rankWindowSize; + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(Retriever value) { + this.retriever = value; + return this; + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(Function> fn) { + return this.retriever(fn.apply(new Retriever.Builder()).build()); + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(RetrieverVariant value) { + this.retriever = value._toRetriever(); + return this; + } + + /** + * API name: {@code ids} + *

    + * Adds all elements of list to ids. + */ + public final Builder ids(List list) { + this.ids = _listAddAll(this.ids, list); + return this; + } + + /** + * API name: {@code ids} + *

    + * Adds one or more values to ids. + */ + public final Builder ids(String value, String... values) { + this.ids = _listAdd(this.ids, value, values); + return this; + } + + /** + * API name: {@code docs} + *

    + * Adds all elements of list to docs. + */ + public final Builder docs(List list) { + this.docs = _listAddAll(this.docs, list); + return this; + } + + /** + * API name: {@code docs} + *

    + * Adds one or more values to docs. + */ + public final Builder docs(SpecifiedDocument value, SpecifiedDocument... values) { + this.docs = _listAdd(this.docs, value, values); + return this; + } + + /** + * API name: {@code docs} + *

    + * Adds a value to docs using a builder lambda. + */ + public final Builder docs(Function> fn) { + return docs(fn.apply(new SpecifiedDocument.Builder()).build()); + } + + /** + * Required - API name: {@code rank_window_size} + */ + public final Builder rankWindowSize(int value) { + this.rankWindowSize = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PinnedRetriever}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PinnedRetriever build() { + _checkSingleUse(); + + return new PinnedRetriever(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link PinnedRetriever} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + PinnedRetriever::setupPinnedRetrieverDeserializer); + + protected static void setupPinnedRetrieverDeserializer(ObjectDeserializer op) { + RetrieverBase.setupRetrieverBaseDeserializer(op); + op.add(Builder::retriever, Retriever._DESERIALIZER, "retriever"); + op.add(Builder::ids, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "ids"); + op.add(Builder::docs, JsonpDeserializer.arrayDeserializer(SpecifiedDocument._DESERIALIZER), "docs"); + op.add(Builder::rankWindowSize, JsonpDeserializer.integerDeserializer(), "rank_window_size"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RescorerRetriever.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RescorerRetriever.java new file mode 100644 index 000000000..92fca5673 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RescorerRetriever.java @@ -0,0 +1,239 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.elasticsearch.core.search.Rescore; +import co.elastic.clients.elasticsearch.core.search.RescoreVariant; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.RescorerRetriever + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class RescorerRetriever extends RetrieverBase implements RetrieverVariant { + private final Retriever retriever; + + private final List rescore; + + // --------------------------------------------------------------------------------------------- + + private RescorerRetriever(Builder builder) { + super(builder); + + this.retriever = ApiTypeHelper.requireNonNull(builder.retriever, this, "retriever"); + this.rescore = ApiTypeHelper.unmodifiableRequired(builder.rescore, this, "rescore"); + + } + + public static RescorerRetriever of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Retriever variant kind. + */ + @Override + public Retriever.Kind _retrieverKind() { + return Retriever.Kind.Rescorer; + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Retriever retriever() { + return this.retriever; + } + + /** + * Required - API name: {@code rescore} + */ + public final List rescore() { + return this.rescore; + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + super.serializeInternal(generator, mapper); + generator.writeKey("retriever"); + this.retriever.serialize(generator, mapper); + + if (ApiTypeHelper.isDefined(this.rescore)) { + generator.writeKey("rescore"); + generator.writeStartArray(); + for (Rescore item0 : this.rescore) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link RescorerRetriever}. + */ + + public static class Builder extends RetrieverBase.AbstractBuilder + implements + ObjectBuilder { + private Retriever retriever; + + private List rescore; + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(Retriever value) { + this.retriever = value; + return this; + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(Function> fn) { + return this.retriever(fn.apply(new Retriever.Builder()).build()); + } + + /** + * Required - Inner retriever. + *

    + * API name: {@code retriever} + */ + public final Builder retriever(RetrieverVariant value) { + this.retriever = value._toRetriever(); + return this; + } + + /** + * Required - API name: {@code rescore} + *

    + * Adds all elements of list to rescore. + */ + public final Builder rescore(List list) { + this.rescore = _listAddAll(this.rescore, list); + return this; + } + + /** + * Required - API name: {@code rescore} + *

    + * Adds one or more values to rescore. + */ + public final Builder rescore(Rescore value, Rescore... values) { + this.rescore = _listAdd(this.rescore, value, values); + return this; + } + + /** + * Required - API name: {@code rescore} + *

    + * Adds one or more values to rescore. + */ + public final Builder rescore(RescoreVariant value, RescoreVariant... values) { + this.rescore = _listAdd(this.rescore, value._toRescore()); + for (RescoreVariant v : values) { + _listAdd(this.rescore, v._toRescore()); + } + return this; + } + + /** + * Required - API name: {@code rescore} + *

    + * Adds a value to rescore using a builder lambda. + */ + public final Builder rescore(Function> fn) { + return rescore(fn.apply(new Rescore.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link RescorerRetriever}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public RescorerRetriever build() { + _checkSingleUse(); + + return new RescorerRetriever(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link RescorerRetriever} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, RescorerRetriever::setupRescorerRetrieverDeserializer); + + protected static void setupRescorerRetrieverDeserializer(ObjectDeserializer op) { + RetrieverBase.setupRetrieverBaseDeserializer(op); + op.add(Builder::retriever, Retriever._DESERIALIZER, "retriever"); + op.add(Builder::rescore, JsonpDeserializer.arrayDeserializer(Rescore._DESERIALIZER), "rescore"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retriever.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retriever.java index 40b0649e0..8cd08b6f7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retriever.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/Retriever.java @@ -81,6 +81,12 @@ public enum Kind implements JsonEnum { Rule("rule"), + Rescorer("rescorer"), + + Linear("linear"), + + Pinned("pinned"), + ; private final String jsonValue; @@ -212,6 +218,57 @@ public RuleRetriever rule() { return TaggedUnionUtils.get(this, Kind.Rule); } + /** + * Is this variant instance of kind {@code rescorer}? + */ + public boolean isRescorer() { + return _kind == Kind.Rescorer; + } + + /** + * Get the {@code rescorer} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code rescorer} kind. + */ + public RescorerRetriever rescorer() { + return TaggedUnionUtils.get(this, Kind.Rescorer); + } + + /** + * Is this variant instance of kind {@code linear}? + */ + public boolean isLinear() { + return _kind == Kind.Linear; + } + + /** + * Get the {@code linear} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code linear} kind. + */ + public LinearRetriever linear() { + return TaggedUnionUtils.get(this, Kind.Linear); + } + + /** + * Is this variant instance of kind {@code pinned}? + */ + public boolean isPinned() { + return _kind == Kind.Pinned; + } + + /** + * Get the {@code pinned} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code pinned} kind. + */ + public PinnedRetriever pinned() { + return TaggedUnionUtils.get(this, Kind.Pinned); + } + @Override @SuppressWarnings("unchecked") public void serialize(JsonGenerator generator, JsonpMapper mapper) { @@ -292,6 +349,37 @@ public ObjectBuilder rule(Function rescorer(RescorerRetriever v) { + this._kind = Kind.Rescorer; + this._value = v; + return this; + } + + public ObjectBuilder rescorer( + Function> fn) { + return this.rescorer(fn.apply(new RescorerRetriever.Builder()).build()); + } + + public ObjectBuilder linear(LinearRetriever v) { + this._kind = Kind.Linear; + this._value = v; + return this; + } + + public ObjectBuilder linear(Function> fn) { + return this.linear(fn.apply(new LinearRetriever.Builder()).build()); + } + + public ObjectBuilder pinned(PinnedRetriever v) { + this._kind = Kind.Pinned; + this._value = v; + return this; + } + + public ObjectBuilder pinned(Function> fn) { + return this.pinned(fn.apply(new PinnedRetriever.Builder()).build()); + } + public Retriever build() { _checkSingleUse(); return new Retriever(this); @@ -306,6 +394,9 @@ protected static void setupRetrieverDeserializer(ObjectDeserializer op) op.add(Builder::rrf, RRFRetriever._DESERIALIZER, "rrf"); op.add(Builder::textSimilarityReranker, TextSimilarityReranker._DESERIALIZER, "text_similarity_reranker"); op.add(Builder::rule, RuleRetriever._DESERIALIZER, "rule"); + op.add(Builder::rescorer, RescorerRetriever._DESERIALIZER, "rescorer"); + op.add(Builder::linear, LinearRetriever._DESERIALIZER, "linear"); + op.add(Builder::pinned, PinnedRetriever._DESERIALIZER, "pinned"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBase.java index d73b206a7..50bc64281 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBase.java @@ -33,6 +33,7 @@ import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.lang.Float; +import java.lang.String; import java.util.List; import java.util.Objects; import java.util.function.Function; @@ -67,12 +68,16 @@ public abstract class RetrieverBase implements JsonpSerializable { @Nullable private final Float minScore; + @Nullable + private final String name; + // --------------------------------------------------------------------------------------------- protected RetrieverBase(AbstractBuilder builder) { this.filter = ApiTypeHelper.unmodifiable(builder.filter); this.minScore = builder.minScore; + this.name = builder.name; } @@ -96,6 +101,16 @@ public final Float minScore() { return this.minScore; } + /** + * Retriever name. + *

    + * API name: {@code _name} + */ + @Nullable + public final String name() { + return this.name; + } + /** * Serialize this object to JSON. */ @@ -122,6 +137,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.minScore); } + if (this.name != null) { + generator.writeKey("_name"); + generator.write(this.name); + + } } @@ -139,6 +159,9 @@ public abstract static class AbstractBuilder @@ -200,6 +223,16 @@ public final BuilderT minScore(@Nullable Float value) { return self(); } + /** + * Retriever name. + *

    + * API name: {@code _name} + */ + public final BuilderT name(@Nullable String value) { + this.name = value; + return self(); + } + protected abstract BuilderT self(); } @@ -210,6 +243,7 @@ protected static > void setupRetrieve op.add(AbstractBuilder::filter, JsonpDeserializer.arrayDeserializer(Query._DESERIALIZER), "filter"); op.add(AbstractBuilder::minScore, JsonpDeserializer.floatDeserializer(), "min_score"); + op.add(AbstractBuilder::name, JsonpDeserializer.stringDeserializer(), "_name"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBuilders.java index b3d22f0ba..9766e80b7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/RetrieverBuilders.java @@ -133,4 +133,58 @@ public static Retriever rule(Function> fn) { + Retriever.Builder builder = new Retriever.Builder(); + builder.rescorer(fn.apply(new RescorerRetriever.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link LinearRetriever linear} {@code Retriever} + * variant. + */ + public static LinearRetriever.Builder linear() { + return new LinearRetriever.Builder(); + } + + /** + * Creates a Retriever of the {@link LinearRetriever linear} {@code Retriever} + * variant. + */ + public static Retriever linear(Function> fn) { + Retriever.Builder builder = new Retriever.Builder(); + builder.linear(fn.apply(new LinearRetriever.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link PinnedRetriever pinned} {@code Retriever} + * variant. + */ + public static PinnedRetriever.Builder pinned() { + return new PinnedRetriever.Builder(); + } + + /** + * Creates a Retriever of the {@link PinnedRetriever pinned} {@code Retriever} + * variant. + */ + public static Retriever pinned(Function> fn) { + Retriever.Builder builder = new Retriever.Builder(); + builder.pinned(fn.apply(new PinnedRetriever.Builder()).build()); + return builder.build(); + } + } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ScoreNormalizer.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ScoreNormalizer.java new file mode 100644 index 000000000..4a6c6cc59 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ScoreNormalizer.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum ScoreNormalizer implements JsonEnum { + None("none"), + + Minmax("minmax"), + + L2Norm("l2_norm"), + + ; + + private final String jsonValue; + + ScoreNormalizer(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + ScoreNormalizer.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SpecifiedDocument.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SpecifiedDocument.java new file mode 100644 index 000000000..84f98d512 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/SpecifiedDocument.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.SpecifiedDocument + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class SpecifiedDocument implements JsonpSerializable { + @Nullable + private final String index; + + private final String id; + + // --------------------------------------------------------------------------------------------- + + private SpecifiedDocument(Builder builder) { + + this.index = builder.index; + this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); + + } + + public static SpecifiedDocument of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * API name: {@code index} + */ + @Nullable + public final String index() { + return this.index; + } + + /** + * Required - API name: {@code id} + */ + public final String id() { + return this.id; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.index != null) { + generator.writeKey("index"); + generator.write(this.index); + + } + generator.writeKey("id"); + generator.write(this.id); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link SpecifiedDocument}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private String index; + + private String id; + + /** + * API name: {@code index} + */ + public final Builder index(@Nullable String value) { + this.index = value; + return this; + } + + /** + * Required - API name: {@code id} + */ + public final Builder id(String value) { + this.id = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link SpecifiedDocument}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SpecifiedDocument build() { + _checkSingleUse(); + + return new SpecifiedDocument(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link SpecifiedDocument} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, SpecifiedDocument::setupSpecifiedDocumentDeserializer); + + protected static void setupSpecifiedDocumentDeserializer(ObjectDeserializer op) { + + op.add(Builder::index, JsonpDeserializer.stringDeserializer(), "index"); + op.add(Builder::id, JsonpDeserializer.stringDeserializer(), "id"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/BoxplotAggregation.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/BoxplotAggregation.java index 06573f746..6a0c49e94 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/BoxplotAggregation.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/BoxplotAggregation.java @@ -59,12 +59,16 @@ public class BoxplotAggregation extends MetricAggregationBase implements Aggrega @Nullable private final Double compression; + @Nullable + private final TDigestExecutionHint executionHint; + // --------------------------------------------------------------------------------------------- private BoxplotAggregation(Builder builder) { super(builder); this.compression = builder.compression; + this.executionHint = builder.executionHint; } @@ -92,6 +96,20 @@ public final Double compression() { return this.compression; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + @Nullable + public final TDigestExecutionHint executionHint() { + return this.executionHint; + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { super.serializeInternal(generator, mapper); @@ -100,6 +118,10 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.compression); } + if (this.executionHint != null) { + generator.writeKey("execution_hint"); + this.executionHint.serialize(generator, mapper); + } } @@ -115,6 +137,9 @@ public static class Builder extends MetricAggregationBase.AbstractBuilder20 * compression, enabling control of memory usage and @@ -127,6 +152,20 @@ public final Builder compression(@Nullable Double value) { return this; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + public final Builder executionHint(@Nullable TDigestExecutionHint value) { + this.executionHint = value; + return this; + } + @Override protected Builder self() { return this; @@ -156,6 +195,7 @@ public BoxplotAggregation build() { protected static void setupBoxplotAggregationDeserializer(ObjectDeserializer op) { MetricAggregationBase.setupMetricAggregationBaseDeserializer(op); op.add(Builder::compression, JsonpDeserializer.doubleDeserializer(), "compression"); + op.add(Builder::executionHint, TDigestExecutionHint._DESERIALIZER, "execution_hint"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/MedianAbsoluteDeviationAggregation.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/MedianAbsoluteDeviationAggregation.java index e2c674e8c..5703ba8fe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/MedianAbsoluteDeviationAggregation.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/MedianAbsoluteDeviationAggregation.java @@ -59,12 +59,16 @@ public class MedianAbsoluteDeviationAggregation extends FormatMetricAggregationB @Nullable private final Double compression; + @Nullable + private final TDigestExecutionHint executionHint; + // --------------------------------------------------------------------------------------------- private MedianAbsoluteDeviationAggregation(Builder builder) { super(builder); this.compression = builder.compression; + this.executionHint = builder.executionHint; } @@ -93,6 +97,20 @@ public final Double compression() { return this.compression; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + @Nullable + public final TDigestExecutionHint executionHint() { + return this.executionHint; + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { super.serializeInternal(generator, mapper); @@ -101,6 +119,10 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.compression); } + if (this.executionHint != null) { + generator.writeKey("execution_hint"); + this.executionHint.serialize(generator, mapper); + } } @@ -116,6 +138,9 @@ public static class Builder extends FormatMetricAggregationBase.AbstractBuilder< @Nullable private Double compression; + @Nullable + private TDigestExecutionHint executionHint; + /** * Limits the maximum number of nodes used by the underlying TDigest algorithm * to 20 * compression, enabling control of memory usage and @@ -128,6 +153,20 @@ public final Builder compression(@Nullable Double value) { return this; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + public final Builder executionHint(@Nullable TDigestExecutionHint value) { + this.executionHint = value; + return this; + } + @Override protected Builder self() { return this; @@ -159,6 +198,7 @@ protected static void setupMedianAbsoluteDeviationAggregationDeserializer( ObjectDeserializer op) { FormatMetricAggregationBase.setupFormatMetricAggregationBaseDeserializer(op); op.add(Builder::compression, JsonpDeserializer.doubleDeserializer(), "compression"); + op.add(Builder::executionHint, TDigestExecutionHint._DESERIALIZER, "execution_hint"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigest.java index 5a0442d3e..a1a3da2c1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigest.java @@ -61,11 +61,15 @@ public class TDigest implements JsonpSerializable { @Nullable private final Integer compression; + @Nullable + private final TDigestExecutionHint executionHint; + // --------------------------------------------------------------------------------------------- private TDigest(Builder builder) { this.compression = builder.compression; + this.executionHint = builder.executionHint; } @@ -85,6 +89,20 @@ public final Integer compression() { return this.compression; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + @Nullable + public final TDigestExecutionHint executionHint() { + return this.executionHint; + } + /** * Serialize this object to JSON. */ @@ -101,6 +119,10 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.compression); } + if (this.executionHint != null) { + generator.writeKey("execution_hint"); + this.executionHint.serialize(generator, mapper); + } } @@ -119,6 +141,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement @Nullable private Integer compression; + @Nullable + private TDigestExecutionHint executionHint; + /** * Limits the maximum number of nodes used by the underlying TDigest algorithm * to 20 * compression, enabling control of memory usage and @@ -131,6 +156,20 @@ public final Builder compression(@Nullable Integer value) { return this; } + /** + * The default implementation of TDigest is optimized for performance, scaling + * to millions or even billions of sample values while maintaining acceptable + * accuracy levels (close to 1% relative error for millions of samples in some + * cases). To use an implementation optimized for accuracy, set this parameter + * to high_accuracy instead. + *

    + * API name: {@code execution_hint} + */ + public final Builder executionHint(@Nullable TDigestExecutionHint value) { + this.executionHint = value; + return this; + } + @Override protected Builder self() { return this; @@ -160,6 +199,7 @@ public TDigest build() { protected static void setupTDigestDeserializer(ObjectDeserializer op) { op.add(Builder::compression, JsonpDeserializer.integerDeserializer(), "compression"); + op.add(Builder::executionHint, TDigestExecutionHint._DESERIALIZER, "execution_hint"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigestExecutionHint.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigestExecutionHint.java new file mode 100644 index 000000000..29475b2e3 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/aggregations/TDigestExecutionHint.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types.aggregations; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum TDigestExecutionHint implements JsonEnum { + Default("default"), + + HighAccuracy("high_accuracy"), + + ; + + private final String jsonValue; + + TDigestExecutionHint(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + TDigestExecutionHint.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQuery.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQuery.java index f1551cd19..8325dab86 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQuery.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQuery.java @@ -77,7 +77,7 @@ public class GeoGridQuery extends QueryBase */ public enum Kind implements JsonEnum { - Geogrid("geogrid"), + Geotile("geotile"), Geohash("geohash"), @@ -142,20 +142,20 @@ public final String field() { } /** - * Is this variant instance of kind {@code geogrid}? + * Is this variant instance of kind {@code geotile}? */ - public boolean isGeogrid() { - return _kind == Kind.Geogrid; + public boolean isGeotile() { + return _kind == Kind.Geotile; } /** - * Get the {@code geogrid} variant value. + * Get the {@code geotile} variant value. * * @throws IllegalStateException - * if the current variant is not of the {@code geogrid} kind. + * if the current variant is not of the {@code geotile} kind. */ - public String geogrid() { - return TaggedUnionUtils.get(this, Kind.Geogrid); + public String geotile() { + return TaggedUnionUtils.get(this, Kind.Geotile); } /** @@ -206,7 +206,7 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { ((JsonpSerializable) _value).serialize(generator, mapper); } else { switch (_kind) { - case Geogrid : + case Geotile : generator.write(((String) this._value)); break; @@ -250,8 +250,8 @@ public final Builder field(String value) { protected Builder self() { return this; } - public ObjectBuilder geogrid(String v) { - this._kind = Kind.Geogrid; + public ObjectBuilder geotile(String v) { + this._kind = Kind.Geotile; this._value = v; return this; } @@ -277,7 +277,7 @@ public GeoGridQuery build() { protected static void setupGeoGridQueryDeserializer(ObjectDeserializer op) { QueryBase.setupQueryBaseDeserializer(op); - op.add(Builder::geogrid, JsonpDeserializer.stringDeserializer(), "geogrid"); + op.add(Builder::geotile, JsonpDeserializer.stringDeserializer(), "geotile"); op.add(Builder::geohash, JsonpDeserializer.stringDeserializer(), "geohash"); op.add(Builder::geohex, JsonpDeserializer.stringDeserializer(), "geohex"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQueryBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQueryBuilders.java index ac50148d6..2ebcce265 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQueryBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/GeoGridQueryBuilders.java @@ -40,7 +40,7 @@ /** * Builders for {@link GeoGridQuery} variants. *

    - * Variants geogrid, geohash, geohex are + * Variants geotile, geohash, geohex are * not available here as they don't have a dedicated class. Use * {@link GeoGridQuery}'s builder for these. * diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Intervals.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Intervals.java index a550021f3..5de16ec96 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Intervals.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/Intervals.java @@ -83,6 +83,10 @@ public enum Kind implements JsonEnum { Prefix("prefix"), + Range("range"), + + Regexp("regexp"), + Wildcard("wildcard"), ; @@ -223,6 +227,40 @@ public IntervalsPrefix prefix() { return TaggedUnionUtils.get(this, Kind.Prefix); } + /** + * Is this variant instance of kind {@code range}? + */ + public boolean isRange() { + return _kind == Kind.Range; + } + + /** + * Get the {@code range} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code range} kind. + */ + public IntervalsRange range() { + return TaggedUnionUtils.get(this, Kind.Range); + } + + /** + * Is this variant instance of kind {@code regexp}? + */ + public boolean isRegexp() { + return _kind == Kind.Regexp; + } + + /** + * Get the {@code regexp} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code regexp} kind. + */ + public IntervalsRegexp regexp() { + return TaggedUnionUtils.get(this, Kind.Regexp); + } + /** * Is this variant instance of kind {@code wildcard}? */ @@ -318,6 +356,26 @@ public ObjectBuilder prefix(Function range(IntervalsRange v) { + this._kind = Kind.Range; + this._value = v; + return this; + } + + public ObjectBuilder range(Function> fn) { + return this.range(fn.apply(new IntervalsRange.Builder()).build()); + } + + public ObjectBuilder regexp(IntervalsRegexp v) { + this._kind = Kind.Regexp; + this._value = v; + return this; + } + + public ObjectBuilder regexp(Function> fn) { + return this.regexp(fn.apply(new IntervalsRegexp.Builder()).build()); + } + public ObjectBuilder wildcard(IntervalsWildcard v) { this._kind = Kind.Wildcard; this._value = v; @@ -343,6 +401,8 @@ protected static void setupIntervalsDeserializer(ObjectDeserializer op) op.add(Builder::fuzzy, IntervalsFuzzy._DESERIALIZER, "fuzzy"); op.add(Builder::match, IntervalsMatch._DESERIALIZER, "match"); op.add(Builder::prefix, IntervalsPrefix._DESERIALIZER, "prefix"); + op.add(Builder::range, IntervalsRange._DESERIALIZER, "range"); + op.add(Builder::regexp, IntervalsRegexp._DESERIALIZER, "regexp"); op.add(Builder::wildcard, IntervalsWildcard._DESERIALIZER, "wildcard"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsBuilders.java index 7904ee5e3..c56487b32 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsBuilders.java @@ -134,6 +134,42 @@ public static Intervals prefix(Function> fn) { + Intervals.Builder builder = new Intervals.Builder(); + builder.range(fn.apply(new IntervalsRange.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link IntervalsRegexp regexp} {@code Intervals} + * variant. + */ + public static IntervalsRegexp.Builder regexp() { + return new IntervalsRegexp.Builder(); + } + + /** + * Creates a Intervals of the {@link IntervalsRegexp regexp} {@code Intervals} + * variant. + */ + public static Intervals regexp(Function> fn) { + Intervals.Builder builder = new Intervals.Builder(); + builder.regexp(fn.apply(new IntervalsRegexp.Builder()).build()); + return builder.build(); + } + /** * Creates a builder for the {@link IntervalsWildcard wildcard} * {@code Intervals} variant. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsQuery.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsQuery.java index 837e178a7..4cee275d8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsQuery.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsQuery.java @@ -87,6 +87,10 @@ public enum Kind implements JsonEnum { Prefix("prefix"), + Range("range"), + + Regexp("regexp"), + Wildcard("wildcard"), ; @@ -232,6 +236,40 @@ public IntervalsPrefix prefix() { return TaggedUnionUtils.get(this, Kind.Prefix); } + /** + * Is this variant instance of kind {@code range}? + */ + public boolean isRange() { + return _kind == Kind.Range; + } + + /** + * Get the {@code range} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code range} kind. + */ + public IntervalsRange range() { + return TaggedUnionUtils.get(this, Kind.Range); + } + + /** + * Is this variant instance of kind {@code regexp}? + */ + public boolean isRegexp() { + return _kind == Kind.Regexp; + } + + /** + * Get the {@code regexp} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code regexp} kind. + */ + public IntervalsRegexp regexp() { + return TaggedUnionUtils.get(this, Kind.Regexp); + } + /** * Is this variant instance of kind {@code wildcard}? */ @@ -343,6 +381,27 @@ public ObjectBuilder prefix( return this.prefix(fn.apply(new IntervalsPrefix.Builder()).build()); } + public ObjectBuilder range(IntervalsRange v) { + this._kind = Kind.Range; + this._value = v; + return this; + } + + public ObjectBuilder range(Function> fn) { + return this.range(fn.apply(new IntervalsRange.Builder()).build()); + } + + public ObjectBuilder regexp(IntervalsRegexp v) { + this._kind = Kind.Regexp; + this._value = v; + return this; + } + + public ObjectBuilder regexp( + Function> fn) { + return this.regexp(fn.apply(new IntervalsRegexp.Builder()).build()); + } + public ObjectBuilder wildcard(IntervalsWildcard v) { this._kind = Kind.Wildcard; this._value = v; @@ -368,6 +427,8 @@ protected static void setupIntervalsQueryDeserializer(ObjectDeserializer> fn) { + IntervalsQuery.Builder builder = new IntervalsQuery.Builder(); + builder.range(fn.apply(new IntervalsRange.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link IntervalsRegexp regexp} + * {@code IntervalsQuery} variant. + */ + public static IntervalsRegexp.Builder regexp() { + return new IntervalsRegexp.Builder(); + } + + /** + * Creates a IntervalsQuery of the {@link IntervalsRegexp regexp} + * {@code IntervalsQuery} variant. + */ + public static IntervalsQuery regexp(Function> fn) { + IntervalsQuery.Builder builder = new IntervalsQuery.Builder(); + builder.regexp(fn.apply(new IntervalsRegexp.Builder()).build()); + return builder.build(); + } + /** * Creates a builder for the {@link IntervalsWildcard wildcard} * {@code IntervalsQuery} variant. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRange.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRange.java new file mode 100644 index 000000000..66d6c5534 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRange.java @@ -0,0 +1,348 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types.query_dsl; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.query_dsl.IntervalsRange + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class IntervalsRange implements IntervalsQueryVariant, IntervalsVariant, JsonpSerializable { + @Nullable + private final String analyzer; + + @Nullable + private final String gte; + + @Nullable + private final String gt; + + @Nullable + private final String lte; + + @Nullable + private final String lt; + + @Nullable + private final String useField; + + // --------------------------------------------------------------------------------------------- + + private IntervalsRange(Builder builder) { + + this.analyzer = builder.analyzer; + this.gte = builder.gte; + this.gt = builder.gt; + this.lte = builder.lte; + this.lt = builder.lt; + this.useField = builder.useField; + + } + + public static IntervalsRange of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * IntervalsQuery variant kind. + */ + @Override + public IntervalsQuery.Kind _intervalsQueryKind() { + return IntervalsQuery.Kind.Range; + } + + /** + * Intervals variant kind. + */ + @Override + public Intervals.Kind _intervalsKind() { + return Intervals.Kind.Range; + } + + /** + * Analyzer used to analyze the prefix. + *

    + * API name: {@code analyzer} + */ + @Nullable + public final String analyzer() { + return this.analyzer; + } + + /** + * Lower term, either gte or gt must be provided. + *

    + * API name: {@code gte} + */ + @Nullable + public final String gte() { + return this.gte; + } + + /** + * Lower term, either gte or gt must be provided. + *

    + * API name: {@code gt} + */ + @Nullable + public final String gt() { + return this.gt; + } + + /** + * Upper term, either lte or lt must be provided. + *

    + * API name: {@code lte} + */ + @Nullable + public final String lte() { + return this.lte; + } + + /** + * Upper term, either lte or lt must be provided. + *

    + * API name: {@code lt} + */ + @Nullable + public final String lt() { + return this.lt; + } + + /** + * If specified, match intervals from this field rather than the top-level + * field. The prefix is normalized using the search analyzer from + * this field, unless analyzer is specified separately. + *

    + * API name: {@code use_field} + */ + @Nullable + public final String useField() { + return this.useField; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.analyzer != null) { + generator.writeKey("analyzer"); + generator.write(this.analyzer); + + } + if (this.gte != null) { + generator.writeKey("gte"); + generator.write(this.gte); + + } + if (this.gt != null) { + generator.writeKey("gt"); + generator.write(this.gt); + + } + if (this.lte != null) { + generator.writeKey("lte"); + generator.write(this.lte); + + } + if (this.lt != null) { + generator.writeKey("lt"); + generator.write(this.lt); + + } + if (this.useField != null) { + generator.writeKey("use_field"); + generator.write(this.useField); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link IntervalsRange}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private String analyzer; + + @Nullable + private String gte; + + @Nullable + private String gt; + + @Nullable + private String lte; + + @Nullable + private String lt; + + @Nullable + private String useField; + + /** + * Analyzer used to analyze the prefix. + *

    + * API name: {@code analyzer} + */ + public final Builder analyzer(@Nullable String value) { + this.analyzer = value; + return this; + } + + /** + * Lower term, either gte or gt must be provided. + *

    + * API name: {@code gte} + */ + public final Builder gte(@Nullable String value) { + this.gte = value; + return this; + } + + /** + * Lower term, either gte or gt must be provided. + *

    + * API name: {@code gt} + */ + public final Builder gt(@Nullable String value) { + this.gt = value; + return this; + } + + /** + * Upper term, either lte or lt must be provided. + *

    + * API name: {@code lte} + */ + public final Builder lte(@Nullable String value) { + this.lte = value; + return this; + } + + /** + * Upper term, either lte or lt must be provided. + *

    + * API name: {@code lt} + */ + public final Builder lt(@Nullable String value) { + this.lt = value; + return this; + } + + /** + * If specified, match intervals from this field rather than the top-level + * field. The prefix is normalized using the search analyzer from + * this field, unless analyzer is specified separately. + *

    + * API name: {@code use_field} + */ + public final Builder useField(@Nullable String value) { + this.useField = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link IntervalsRange}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public IntervalsRange build() { + _checkSingleUse(); + + return new IntervalsRange(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link IntervalsRange} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + IntervalsRange::setupIntervalsRangeDeserializer); + + protected static void setupIntervalsRangeDeserializer(ObjectDeserializer op) { + + op.add(Builder::analyzer, JsonpDeserializer.stringDeserializer(), "analyzer"); + op.add(Builder::gte, JsonpDeserializer.stringDeserializer(), "gte"); + op.add(Builder::gt, JsonpDeserializer.stringDeserializer(), "gt"); + op.add(Builder::lte, JsonpDeserializer.stringDeserializer(), "lte"); + op.add(Builder::lt, JsonpDeserializer.stringDeserializer(), "lt"); + op.add(Builder::useField, JsonpDeserializer.stringDeserializer(), "use_field"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRegexp.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRegexp.java new file mode 100644 index 000000000..9b825b60b --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/IntervalsRegexp.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch._types.query_dsl; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: _types.query_dsl.IntervalsRegexp + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class IntervalsRegexp implements IntervalsQueryVariant, IntervalsVariant, JsonpSerializable { + @Nullable + private final String analyzer; + + private final String pattern; + + @Nullable + private final String useField; + + // --------------------------------------------------------------------------------------------- + + private IntervalsRegexp(Builder builder) { + + this.analyzer = builder.analyzer; + this.pattern = ApiTypeHelper.requireNonNull(builder.pattern, this, "pattern"); + this.useField = builder.useField; + + } + + public static IntervalsRegexp of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * IntervalsQuery variant kind. + */ + @Override + public IntervalsQuery.Kind _intervalsQueryKind() { + return IntervalsQuery.Kind.Regexp; + } + + /** + * Intervals variant kind. + */ + @Override + public Intervals.Kind _intervalsKind() { + return Intervals.Kind.Regexp; + } + + /** + * Analyzer used to analyze the prefix. + *

    + * API name: {@code analyzer} + */ + @Nullable + public final String analyzer() { + return this.analyzer; + } + + /** + * Required - Regex pattern. + *

    + * API name: {@code pattern} + */ + public final String pattern() { + return this.pattern; + } + + /** + * If specified, match intervals from this field rather than the top-level + * field. The prefix is normalized using the search analyzer from + * this field, unless analyzer is specified separately. + *

    + * API name: {@code use_field} + */ + @Nullable + public final String useField() { + return this.useField; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.analyzer != null) { + generator.writeKey("analyzer"); + generator.write(this.analyzer); + + } + generator.writeKey("pattern"); + generator.write(this.pattern); + + if (this.useField != null) { + generator.writeKey("use_field"); + generator.write(this.useField); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link IntervalsRegexp}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private String analyzer; + + private String pattern; + + @Nullable + private String useField; + + /** + * Analyzer used to analyze the prefix. + *

    + * API name: {@code analyzer} + */ + public final Builder analyzer(@Nullable String value) { + this.analyzer = value; + return this; + } + + /** + * Required - Regex pattern. + *

    + * API name: {@code pattern} + */ + public final Builder pattern(String value) { + this.pattern = value; + return this; + } + + /** + * If specified, match intervals from this field rather than the top-level + * field. The prefix is normalized using the search analyzer from + * this field, unless analyzer is specified separately. + *

    + * API name: {@code use_field} + */ + public final Builder useField(@Nullable String value) { + this.useField = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link IntervalsRegexp}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public IntervalsRegexp build() { + _checkSingleUse(); + + return new IntervalsRegexp(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link IntervalsRegexp} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + IntervalsRegexp::setupIntervalsRegexpDeserializer); + + protected static void setupIntervalsRegexpDeserializer(ObjectDeserializer op) { + + op.add(Builder::analyzer, JsonpDeserializer.stringDeserializer(), "analyzer"); + op.add(Builder::pattern, JsonpDeserializer.stringDeserializer(), "pattern"); + op.add(Builder::useField, JsonpDeserializer.stringDeserializer(), "use_field"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/RuleQuery.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/RuleQuery.java index 655842172..b3080b2d0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/RuleQuery.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/RuleQuery.java @@ -62,6 +62,9 @@ public class RuleQuery extends QueryBase implements QueryVariant { private final List rulesetIds; + @Nullable + private final String rulesetId; + private final JsonData matchCriteria; // --------------------------------------------------------------------------------------------- @@ -70,7 +73,8 @@ private RuleQuery(Builder builder) { super(builder); this.organic = ApiTypeHelper.requireNonNull(builder.organic, this, "organic"); - this.rulesetIds = ApiTypeHelper.unmodifiableRequired(builder.rulesetIds, this, "rulesetIds"); + this.rulesetIds = ApiTypeHelper.unmodifiable(builder.rulesetIds); + this.rulesetId = builder.rulesetId; this.matchCriteria = ApiTypeHelper.requireNonNull(builder.matchCriteria, this, "matchCriteria"); } @@ -95,12 +99,20 @@ public final Query organic() { } /** - * Required - API name: {@code ruleset_ids} + * API name: {@code ruleset_ids} */ public final List rulesetIds() { return this.rulesetIds; } + /** + * API name: {@code ruleset_id} + */ + @Nullable + public final String rulesetId() { + return this.rulesetId; + } + /** * Required - API name: {@code match_criteria} */ @@ -123,6 +135,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { } generator.writeEnd(); + } + if (this.rulesetId != null) { + generator.writeKey("ruleset_id"); + generator.write(this.rulesetId); + } generator.writeKey("match_criteria"); this.matchCriteria.serialize(generator, mapper); @@ -138,8 +155,12 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { public static class Builder extends QueryBase.AbstractBuilder implements ObjectBuilder { private Query organic; + @Nullable private List rulesetIds; + @Nullable + private String rulesetId; + private JsonData matchCriteria; /** @@ -166,7 +187,7 @@ public final Builder organic(QueryVariant value) { } /** - * Required - API name: {@code ruleset_ids} + * API name: {@code ruleset_ids} *

    * Adds all elements of list to rulesetIds. */ @@ -176,7 +197,7 @@ public final Builder rulesetIds(List list) { } /** - * Required - API name: {@code ruleset_ids} + * API name: {@code ruleset_ids} *

    * Adds one or more values to rulesetIds. */ @@ -185,6 +206,14 @@ public final Builder rulesetIds(String value, String... values) { return this; } + /** + * API name: {@code ruleset_id} + */ + public final Builder rulesetId(@Nullable String value) { + this.rulesetId = value; + return this; + } + /** * Required - API name: {@code match_criteria} */ @@ -224,6 +253,7 @@ protected static void setupRuleQueryDeserializer(ObjectDeserializer tokens; + private final List> tokens; @Nullable private final TokenPruningConfig pruningConfig; @@ -102,7 +103,7 @@ public final String field() { *

    * API name: {@code tokens} */ - public final Map tokens() { + public final List> tokens() { return this.tokens; } @@ -122,10 +123,17 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { super.serializeInternal(generator, mapper); if (ApiTypeHelper.isDefined(this.tokens)) { generator.writeKey("tokens"); - generator.writeStartObject(); - for (Map.Entry item0 : this.tokens.entrySet()) { - generator.writeKey(item0.getKey()); - generator.write(item0.getValue()); + generator.writeStartArray(); + for (Map item0 : this.tokens) { + generator.writeStartObject(); + if (item0 != null) { + for (Map.Entry item1 : item0.entrySet()) { + generator.writeKey(item1.getKey()); + generator.write(item1.getValue()); + + } + } + generator.writeEnd(); } generator.writeEnd(); @@ -160,7 +168,7 @@ public final Builder field(String value) { return this; } - private Map tokens; + private List> tokens; @Nullable private TokenPruningConfig pruningConfig; @@ -170,10 +178,10 @@ public final Builder field(String value) { *

    * API name: {@code tokens} *

    - * Adds all entries of map to tokens. + * Adds all elements of list to tokens. */ - public final Builder tokens(Map map) { - this.tokens = _mapPutAll(this.tokens, map); + public final Builder tokens(List> list) { + this.tokens = _listAddAll(this.tokens, list); return this; } @@ -182,10 +190,10 @@ public final Builder tokens(Map map) { *

    * API name: {@code tokens} *

    - * Adds an entry to tokens. + * Adds one or more values to tokens. */ - public final Builder tokens(String key, Float value) { - this.tokens = _mapPut(this.tokens, key, value); + public final Builder tokens(Map value, Map... values) { + this.tokens = _listAdd(this.tokens, value, values); return this; } @@ -236,8 +244,8 @@ public WeightedTokensQuery build() { protected static void setupWeightedTokensQueryDeserializer(ObjectDeserializer op) { QueryBase.setupQueryBaseDeserializer(op); - op.add(Builder::tokens, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.floatDeserializer()), - "tokens"); + op.add(Builder::tokens, JsonpDeserializer.arrayDeserializer( + JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.floatDeserializer())), "tokens"); op.add(Builder::pruningConfig, TokenPruningConfig._DESERIALIZER, "pruning_config"); op.setKey(Builder::field, JsonpDeserializer.stringDeserializer()); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java index 4b0ef194d..af0935611 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java @@ -66,7 +66,8 @@ * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateSummary.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateSummary.java index e9ab92f61..047a99518 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateSummary.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ComponentTemplateSummary.java @@ -22,6 +22,7 @@ import co.elastic.clients.elasticsearch._types.mapping.TypeMapping; import co.elastic.clients.elasticsearch.indices.AliasDefinition; import co.elastic.clients.elasticsearch.indices.DataStreamLifecycleWithRollover; +import co.elastic.clients.elasticsearch.indices.DataStreamOptionsTemplate; import co.elastic.clients.elasticsearch.indices.IndexSettings; import co.elastic.clients.json.JsonData; import co.elastic.clients.json.JsonpDeserializable; @@ -82,6 +83,9 @@ public class ComponentTemplateSummary implements JsonpSerializable { @Nullable private final DataStreamLifecycleWithRollover lifecycle; + @Nullable + private final DataStreamOptionsTemplate dataStreamOptions; + // --------------------------------------------------------------------------------------------- private ComponentTemplateSummary(Builder builder) { @@ -92,6 +96,7 @@ private ComponentTemplateSummary(Builder builder) { this.mappings = builder.mappings; this.aliases = ApiTypeHelper.unmodifiable(builder.aliases); this.lifecycle = builder.lifecycle; + this.dataStreamOptions = builder.dataStreamOptions; } @@ -144,6 +149,14 @@ public final DataStreamLifecycleWithRollover lifecycle() { return this.lifecycle; } + /** + * API name: {@code data_stream_options} + */ + @Nullable + public final DataStreamOptionsTemplate dataStreamOptions() { + return this.dataStreamOptions; + } + /** * Serialize this object to JSON. */ @@ -203,6 +216,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.lifecycle.serialize(generator, mapper); } + if (this.dataStreamOptions != null) { + generator.writeKey("data_stream_options"); + this.dataStreamOptions.serialize(generator, mapper); + + } } @@ -238,6 +256,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private DataStreamLifecycleWithRollover lifecycle; + @Nullable + private DataStreamOptionsTemplate dataStreamOptions; + /** * API name: {@code _meta} *

    @@ -355,6 +376,22 @@ public final Builder lifecycle( return this.lifecycle(fn.apply(new DataStreamLifecycleWithRollover.Builder()).build()); } + /** + * API name: {@code data_stream_options} + */ + public final Builder dataStreamOptions(@Nullable DataStreamOptionsTemplate value) { + this.dataStreamOptions = value; + return this; + } + + /** + * API name: {@code data_stream_options} + */ + public final Builder dataStreamOptions( + Function> fn) { + return this.dataStreamOptions(fn.apply(new DataStreamOptionsTemplate.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -390,6 +427,7 @@ protected static void setupComponentTemplateSummaryDeserializer( op.add(Builder::mappings, TypeMapping._DESERIALIZER, "mappings"); op.add(Builder::aliases, JsonpDeserializer.stringMapDeserializer(AliasDefinition._DESERIALIZER), "aliases"); op.add(Builder::lifecycle, DataStreamLifecycleWithRollover._DESERIALIZER, "lifecycle"); + op.add(Builder::dataStreamOptions, DataStreamOptionsTemplate._DESERIALIZER, "data_stream_options"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java index eeadc9356..410c61e96 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java @@ -77,7 +77,8 @@ public ElasticsearchClusterAsyncClient withTransportOptions(@Nullable TransportO * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @see Documentation @@ -98,7 +99,8 @@ public CompletableFuture allocationExplain(Allocation * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @param fn * a function that initializes a builder to create the @@ -120,7 +122,8 @@ public final CompletableFuture allocationExplain( * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @see Documentation @@ -304,8 +307,9 @@ public CompletableFuture getComponentTemplate() { // ----- Endpoint: cluster.get_settings /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @see Documentation @@ -320,8 +324,9 @@ public CompletableFuture getSettings(GetClusterSetti } /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @param fn * a function that initializes a builder to create the @@ -337,8 +342,9 @@ public final CompletableFuture getSettings( } /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java index 73fb4bfad..2083dcc45 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java @@ -75,7 +75,8 @@ public ElasticsearchClusterClient withTransportOptions(@Nullable TransportOption * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @see Documentation @@ -97,7 +98,8 @@ public AllocationExplainResponse allocationExplain(AllocationExplainRequest requ * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @param fn * a function that initializes a builder to create the @@ -120,7 +122,8 @@ public final AllocationExplainResponse allocationExplain( * shard is remaining on its current node and has not moved or rebalanced to * another node. This API can be very useful when attempting to diagnose why a * shard is unassigned or why a shard continues to remain on its current node - * when you might expect otherwise. + * when you might expect otherwise. Refer to the linked documentation for + * examples of how to troubleshoot allocation issues using this API. * * @see Documentation @@ -310,8 +313,9 @@ public GetComponentTemplateResponse getComponentTemplate() throws IOException, E // ----- Endpoint: cluster.get_settings /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @see Documentation @@ -327,8 +331,9 @@ public GetClusterSettingsResponse getSettings(GetClusterSettingsRequest request) } /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @param fn * a function that initializes a builder to create the @@ -345,8 +350,9 @@ public final GetClusterSettingsResponse getSettings( } /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java index 337291f80..e36b69cf6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java @@ -56,8 +56,9 @@ // typedef: cluster.get_settings.Request /** - * Get cluster-wide settings. By default, it returns only settings that have - * been explicitly defined. + * Get cluster-wide settings. + *

    + * By default, it returns only settings that have been explicitly defined. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsResponse.java index f2a895fad..9237a2d46 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsResponse.java @@ -82,20 +82,26 @@ public static GetClusterSettingsResponse of(Function + * API name: {@code persistent} */ public final Map persistent() { return this.persistent; } /** - * Required - API name: {@code transient} + * Required - The settings that do not persist after the cluster restarts. + *

    + * API name: {@code transient} */ public final Map transient_() { return this.transient_; } /** + * The default setting values. + *

    * API name: {@code defaults} */ public final Map defaults() { @@ -171,7 +177,9 @@ public static class Builder extends WithJsonObjectBuilderBase private Map defaults; /** - * Required - API name: {@code persistent} + * Required - The settings that persist after the cluster restarts. + *

    + * API name: {@code persistent} *

    * Adds all entries of map to persistent. */ @@ -181,7 +189,9 @@ public final Builder persistent(Map map) { } /** - * Required - API name: {@code persistent} + * Required - The settings that persist after the cluster restarts. + *

    + * API name: {@code persistent} *

    * Adds an entry to persistent. */ @@ -191,7 +201,9 @@ public final Builder persistent(String key, JsonData value) { } /** - * Required - API name: {@code transient} + * Required - The settings that do not persist after the cluster restarts. + *

    + * API name: {@code transient} *

    * Adds all entries of map to transient_. */ @@ -201,7 +213,9 @@ public final Builder transient_(Map map) { } /** - * Required - API name: {@code transient} + * Required - The settings that do not persist after the cluster restarts. + *

    + * API name: {@code transient} *

    * Adds an entry to transient_. */ @@ -211,6 +225,8 @@ public final Builder transient_(String key, JsonData value) { } /** + * The default setting values. + *

    * API name: {@code defaults} *

    * Adds all entries of map to defaults. @@ -221,6 +237,8 @@ public final Builder defaults(Map map) { } /** + * The default setting values. + *

    * API name: {@code defaults} *

    * Adds an entry to defaults. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java index 956e8dc83..b733b6714 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java @@ -146,6 +146,8 @@ public final Time masterTimeout() { } /** + * The settings that persist after the cluster restarts. + *

    * API name: {@code persistent} */ public final Map persistent() { @@ -163,6 +165,8 @@ public final Time timeout() { } /** + * The settings that do not persist after the cluster restarts. + *

    * API name: {@code transient} */ public final Map transient_() { @@ -259,6 +263,8 @@ public final Builder masterTimeout(Function> f } /** + * The settings that persist after the cluster restarts. + *

    * API name: {@code persistent} *

    * Adds all entries of map to persistent. @@ -269,6 +275,8 @@ public final Builder persistent(Map map) { } /** + * The settings that persist after the cluster restarts. + *

    * API name: {@code persistent} *

    * Adds an entry to persistent. @@ -298,6 +306,8 @@ public final Builder timeout(Function> fn) { } /** + * The settings that do not persist after the cluster restarts. + *

    * API name: {@code transient} *

    * Adds all entries of map to transient_. @@ -308,6 +318,8 @@ public final Builder transient_(Map map) { } /** + * The settings that do not persist after the cluster restarts. + *

    * API name: {@code transient} *

    * Adds an entry to transient_. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java index 08e4e2fc1..21df7c2eb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java @@ -228,6 +228,10 @@ * five shards. The request will only wait for those three shards to refresh. * The other two shards that make up the index do not participate in the * _bulk request at all. + *

    + * You might want to disable the refresh interval temporarily to improve + * indexing throughput for large bulk requests. Refer to the linked + * documentation for step-by-step instructions using the index settings API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java index 33e0e492a..3166a9784 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/GetRequest.java @@ -235,7 +235,7 @@ public final List sourceIncludes() { /** * Indicates whether the request forces synthetic _source. Use this - * paramater to test if the mapping supports synthetic _source and + * parameter to test if the mapping supports synthetic _source and * to get a sense of the worst case performance. Fetches with this parameter * enabled will be slower than enabling synthetic source natively in the index. *

    @@ -320,7 +320,7 @@ public final String routing() { * fields are specified, no stored fields are included in the response. If this * field is specified, the _source parameter defaults to * false. Only leaf fields can be retrieved with the - * stored_field option. Object fields can't be returned;​if + * stored_fields option. Object fields can't be returned; if * specified, the request fails. *

    * API name: {@code stored_fields} @@ -479,7 +479,7 @@ public final Builder sourceIncludes(String value, String... values) { /** * Indicates whether the request forces synthetic _source. Use this - * paramater to test if the mapping supports synthetic _source and + * parameter to test if the mapping supports synthetic _source and * to get a sense of the worst case performance. Fetches with this parameter * enabled will be slower than enabling synthetic source natively in the index. *

    @@ -566,7 +566,7 @@ public final Builder routing(@Nullable String value) { * fields are specified, no stored fields are included in the response. If this * field is specified, the _source parameter defaults to * false. Only leaf fields can be retrieved with the - * stored_field option. Object fields can't be returned;​if + * stored_fields option. Object fields can't be returned; if * specified, the request fails. *

    * API name: {@code stored_fields} @@ -583,7 +583,7 @@ public final Builder storedFields(List list) { * fields are specified, no stored fields are included in the response. If this * field is specified, the _source parameter defaults to * false. Only leaf fields can be retrieved with the - * stored_field option. Object fields can't be returned;​if + * stored_fields option. Object fields can't be returned; if * specified, the request fails. *

    * API name: {@code stored_fields} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java index e3e5bf978..07e9867e6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/OpenPointInTimeRequest.java @@ -190,9 +190,7 @@ public final Boolean allowPartialSearchResults() { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -344,9 +342,7 @@ public final Builder allowPartialSearchResults(@Nullable Boolean value) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -361,9 +357,7 @@ public final Builder expandWildcards(List list) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java index 61bc78753..095d75239 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/ReindexRequest.java @@ -131,210 +131,7 @@ * until it has successfully indexed max_docs documents into the * target or it has gone through every document in the source query. *

    - * NOTE: The reindex API makes no effort to handle ID collisions. The last - * document written will "win" but the order isn't usually predictable - * so it is not a good idea to rely on this behavior. Instead, make sure that - * IDs are unique by using a script. - *

    - * Running reindex asynchronously - *

    - * If the request contains wait_for_completion=false, Elasticsearch - * performs some preflight checks, launches the request, and returns a task you - * can use to cancel or get the status of the task. Elasticsearch creates a - * record of this task as a document at _tasks/<task_id>. - *

    - * Reindex from multiple sources - *

    - * If you have many sources to reindex it is generally better to reindex them - * one at a time rather than using a glob pattern to pick up multiple sources. - * That way you can resume the process if there are any errors by removing the - * partially completed source and starting over. It also makes parallelizing the - * process fairly simple: split the list of sources to reindex and run each list - * in parallel. - *

    - * For example, you can use a bash script like this: - * - *

    - * for index in i1 i2 i3 i4 i5; do
    - *   curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
    - *     "source": {
    - *       "index": "'$index'"
    - *     },
    - *     "dest": {
    - *       "index": "'$index'-reindexed"
    - *     }
    - *   }'
    - * done
    - * 
    - * 
    - *

    - * Throttling - *

    - * Set requests_per_second to any positive decimal number - * (1.4, 6, 1000, for example) to - * throttle the rate at which reindex issues batches of index operations. - * Requests are throttled by padding each batch with a wait time. To turn off - * throttling, set requests_per_second to -1. - *

    - * The throttling is done by waiting between batches so that the scroll that - * reindex uses internally can be given a timeout that takes into account the - * padding. The padding time is the difference between the batch size divided by - * the requests_per_second and the time spent writing. By default - * the batch size is 1000, so if requests_per_second - * is set to 500: - * - *

    - * target_time = 1000 / 500 per second = 2 seconds
    - * wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    - * 
    - * 
    - *

    - * Since the batch is issued as a single bulk request, large batch sizes cause - * Elasticsearch to create many requests and then wait for a while before - * starting the next set. This is "bursty" instead of - * "smooth". - *

    - * Slicing - *

    - * Reindex supports sliced scroll to parallelize the reindexing process. This - * parallelization can improve efficiency and provide a convenient way to break - * the request down into smaller parts. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * You can slice a reindex request manually by providing a slice ID and total - * number of slices to each request. You can also let reindex automatically - * parallelize by using sliced scroll to slice on _id. The - * slices parameter specifies the number of slices to use. - *

    - * Adding slices to the reindex request just automates the manual - * process, creating sub-requests which means it has some quirks: - *

      - *
    • You can see these requests in the tasks API. These sub-requests are - * "child" tasks of the task for the request with slices.
    • - *
    • Fetching the status of the task for the request with slices - * only contains the status of completed slices.
    • - *
    • These sub-requests are individually addressable for things like - * cancellation and rethrottling.
    • - *
    • Rethrottling the request with slices will rethrottle the - * unfinished sub-request proportionally.
    • - *
    • Canceling the request with slices will cancel each - * sub-request.
    • - *
    • Due to the nature of slices, each sub-request won't get a - * perfectly even portion of the documents. All documents will be addressed, but - * some slices may be larger than others. Expect larger slices to have a more - * even distribution.
    • - *
    • Parameters like requests_per_second and - * max_docs on a request with slices are distributed - * proportionally to each sub-request. Combine that with the previous point - * about distribution being uneven and you should conclude that using - * max_docs with slices might not result in exactly - * max_docs documents being reindexed.
    • - *
    • Each sub-request gets a slightly different snapshot of the source, though - * these are all taken at approximately the same time.
    • - *
    - *

    - * If slicing automatically, setting slices to auto - * will choose a reasonable number for most indices. If slicing manually or - * otherwise tuning automatic slicing, use the following guidelines. - *

    - * Query performance is most efficient when the number of slices is equal to the - * number of shards in the index. If that number is large (for example, - * 500), choose a lower number as too many slices will hurt - * performance. Setting slices higher than the number of shards generally does - * not improve efficiency and adds overhead. - *

    - * Indexing performance scales linearly across available resources with the - * number of slices. - *

    - * Whether query or indexing performance dominates the runtime depends on the - * documents being reindexed and cluster resources. - *

    - * Modify documents during reindexing - *

    - * Like _update_by_query, reindex operations support a script that - * modifies the document. Unlike _update_by_query, the script is - * allowed to modify the document's metadata. - *

    - * Just as in _update_by_query, you can set ctx.op to - * change the operation that is run on the destination. For example, set - * ctx.op to noop if your script decides that the - * document doesn’t have to be indexed in the destination. This "no - * operation" will be reported in the noop counter in the - * response body. Set ctx.op to delete if your script - * decides that the document must be deleted from the destination. The deletion - * will be reported in the deleted counter in the response body. - * Setting ctx.op to anything else will return an error, as will - * setting any other field in ctx. - *

    - * Think of the possibilities! Just be careful; you are able to change: - *

      - *
    • _id
    • - *
    • _index
    • - *
    • _version
    • - *
    • _routing
    • - *
    - *

    - * Setting _version to null or clearing it from the - * ctx map is just like not sending the version in an indexing - * request. It will cause the document to be overwritten in the destination - * regardless of the version on the target or the version type you use in the - * reindex API. - *

    - * Reindex from remote - *

    - * Reindex supports reindexing from a remote Elasticsearch cluster. The - * host parameter must contain a scheme, host, port, and optional - * path. The username and password parameters are - * optional and when they are present the reindex operation will connect to the - * remote Elasticsearch node using basic authentication. Be sure to use HTTPS - * when using basic authentication or the password will be sent in plain text. - * There are a range of settings available to configure the behavior of the - * HTTPS connection. - *

    - * When using Elastic Cloud, it is also possible to authenticate against the - * remote cluster through the use of a valid API key. Remote hosts must be - * explicitly allowed with the reindex.remote.whitelist setting. It - * can be set to a comma delimited list of allowed remote host and port - * combinations. Scheme is ignored; only the host and port are used. For - * example: - * - *

    - * reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
    - * 
    - * 
    - *

    - * The list of allowed hosts must be configured on any nodes that will - * coordinate the reindex. This feature should work with remote clusters of any - * version of Elasticsearch. This should enable you to upgrade from any version - * of Elasticsearch to the current version by reindexing from a cluster of the - * old version. - *

    - * WARNING: Elasticsearch does not support forward compatibility across major - * versions. For example, you cannot reindex from a 7.x cluster into a 6.x - * cluster. - *

    - * To enable queries sent to older versions of Elasticsearch, the - * query parameter is sent directly to the remote host without - * validation or modification. - *

    - * NOTE: Reindexing from remote clusters does not support manual or automatic - * slicing. - *

    - * Reindexing from a remote server uses an on-heap buffer that defaults to a - * maximum size of 100mb. If the remote index includes very large documents - * you'll need to use a smaller batch size. It is also possible to set the - * socket read timeout on the remote connection with the - * socket_timeout field and the connection timeout with the - * connect_timeout field. Both default to 30 seconds. - *

    - * Configuring SSL parameters - *

    - * Reindex from remote supports configurable SSL settings. These must be - * specified in the elasticsearch.yml file, with the exception of - * the secure settings, which you add in the Elasticsearch keystore. It is not - * possible to configure SSL in the body of the reindex request. + * Refer to the linked documentation for examples of how to reindex documents. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java index 7814cb784..d0dad28e8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchShardsRequest.java @@ -136,9 +136,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -265,9 +263,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -282,9 +278,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java index 0e7d26309..af6512c78 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchTemplateRequest.java @@ -170,9 +170,7 @@ public final Boolean ccsMinimizeRoundtrips() { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -441,9 +439,7 @@ public final Builder ccsMinimizeRoundtrips(@Nullable Boolean value) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -458,9 +454,7 @@ public final Builder expandWildcards(List list) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java index 3c8a1d7a2..8eedd01b8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/TermvectorsRequest.java @@ -119,7 +119,8 @@ * only useful as relative measures whereas the absolute numbers have no meaning * in this context. By default, when requesting term vectors of artificial * documents, a shard to get the statistics from is randomly selected. Use - * routing only to hit a particular shard. + * routing only to hit a particular shard. Refer to the linked + * documentation for detailed examples of how to use this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java index bd1ebd1a8..643cdc0b0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/UpdateByQueryRequest.java @@ -440,9 +440,7 @@ public final String df() { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -992,9 +990,7 @@ public final Builder df(@Nullable String value) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -1009,9 +1005,7 @@ public final Builder expandWildcards(List list) { * The type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. It supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Highlight.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Highlight.java index ece294b7f..de9fe12d2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Highlight.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/Highlight.java @@ -25,10 +25,10 @@ import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.NamedValue; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; -import java.lang.String; -import java.util.Map; +import java.util.List; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -61,7 +61,7 @@ public class Highlight extends HighlightBase { @Nullable private final HighlighterEncoder encoder; - private final Map fields; + private final List> fields; // --------------------------------------------------------------------------------------------- @@ -88,7 +88,7 @@ public final HighlighterEncoder encoder() { /** * Required - API name: {@code fields} */ - public final Map fields() { + public final List> fields() { return this.fields; } @@ -101,10 +101,13 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { } if (ApiTypeHelper.isDefined(this.fields)) { generator.writeKey("fields"); - generator.writeStartObject(); - for (Map.Entry item0 : this.fields.entrySet()) { - generator.writeKey(item0.getKey()); - item0.getValue().serialize(generator, mapper); + generator.writeStartArray(); + for (NamedValue item0 : this.fields) { + generator.writeStartObject(); + generator.writeKey(item0.name()); + item0.value().serialize(generator, mapper); + + generator.writeEnd(); } generator.writeEnd(); @@ -123,7 +126,7 @@ public static class Builder extends HighlightBase.AbstractBuilder imple @Nullable private HighlighterEncoder encoder; - private Map fields; + private List> fields; /** * API name: {@code encoder} @@ -136,32 +139,23 @@ public final Builder encoder(@Nullable HighlighterEncoder value) { /** * Required - API name: {@code fields} *

    - * Adds all entries of map to fields. + * Adds all elements of list to fields. */ - public final Builder fields(Map map) { - this.fields = _mapPutAll(this.fields, map); + public final Builder fields(List> list) { + this.fields = _listAddAll(this.fields, list); return this; } /** * Required - API name: {@code fields} *

    - * Adds an entry to fields. + * Adds one or more values to fields. */ - public final Builder fields(String key, HighlightField value) { - this.fields = _mapPut(this.fields, key, value); + public final Builder fields(NamedValue value, NamedValue... values) { + this.fields = _listAdd(this.fields, value, values); return this; } - /** - * Required - API name: {@code fields} - *

    - * Adds an entry to fields using a builder lambda. - */ - public final Builder fields(String key, Function> fn) { - return fields(key, fn.apply(new HighlightField.Builder()).build()); - } - @Override protected Builder self() { return this; @@ -191,7 +185,9 @@ public Highlight build() { protected static void setupHighlightDeserializer(ObjectDeserializer op) { HighlightBase.setupHighlightBaseDeserializer(op); op.add(Builder::encoder, HighlighterEncoder._DESERIALIZER, "encoder"); - op.add(Builder::fields, JsonpDeserializer.stringMapDeserializer(HighlightField._DESERIALIZER), "fields"); + op.add(Builder::fields, + JsonpDeserializer.arrayDeserializer(NamedValue.deserializer(() -> HighlightField._DESERIALIZER)), + "fields"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/SourceFilter.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/SourceFilter.java index fedb88c1c..8d661dc63 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/SourceFilter.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/search/SourceFilter.java @@ -30,6 +30,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; import java.lang.String; import java.util.List; import java.util.Objects; @@ -61,6 +62,9 @@ */ @JsonpDeserializable public class SourceFilter implements JsonpSerializable { + @Nullable + private final Boolean excludeVectors; + private final List excludes; private final List includes; @@ -69,6 +73,7 @@ public class SourceFilter implements JsonpSerializable { private SourceFilter(Builder builder) { + this.excludeVectors = builder.excludeVectors; this.excludes = ApiTypeHelper.unmodifiable(builder.excludes); this.includes = ApiTypeHelper.unmodifiable(builder.includes); @@ -79,6 +84,21 @@ public static SourceFilter of(Function> fn) } /** + * If true, vector fields are excluded from the returned source. + *

    + * This option takes precedence over includes: any vector field + * will remain excluded even if it matches an includes rule. + *

    + * API name: {@code exclude_vectors} + */ + @Nullable + public final Boolean excludeVectors() { + return this.excludeVectors; + } + + /** + * A list of fields to exclude from the returned source. + *

    * API name: {@code excludes} */ public final List excludes() { @@ -86,6 +106,8 @@ public final List excludes() { } /** + * A list of fields to include in the returned source. + *

    * API name: {@code includes} */ public final List includes() { @@ -103,6 +125,11 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + if (this.excludeVectors != null) { + generator.writeKey("exclude_vectors"); + generator.write(this.excludeVectors); + + } if (ApiTypeHelper.isDefined(this.excludes)) { generator.writeKey("excludes"); generator.writeStartArray(); @@ -138,6 +165,9 @@ public String toString() { */ public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private Boolean excludeVectors; + @Nullable private List excludes; @@ -145,6 +175,21 @@ public static class Builder extends WithJsonObjectBuilderBase implement private List includes; /** + * If true, vector fields are excluded from the returned source. + *

    + * This option takes precedence over includes: any vector field + * will remain excluded even if it matches an includes rule. + *

    + * API name: {@code exclude_vectors} + */ + public final Builder excludeVectors(@Nullable Boolean value) { + this.excludeVectors = value; + return this; + } + + /** + * A list of fields to exclude from the returned source. + *

    * API name: {@code excludes} *

    * Adds all elements of list to excludes. @@ -155,6 +200,8 @@ public final Builder excludes(List list) { } /** + * A list of fields to exclude from the returned source. + *

    * API name: {@code excludes} *

    * Adds one or more values to excludes. @@ -165,6 +212,8 @@ public final Builder excludes(String value, String... values) { } /** + * A list of fields to include in the returned source. + *

    * API name: {@code includes} *

    * Adds all elements of list to includes. @@ -175,6 +224,8 @@ public final Builder includes(List list) { } /** + * A list of fields to include in the returned source. + *

    * API name: {@code includes} *

    * Adds one or more values to includes. @@ -212,6 +263,7 @@ public SourceFilter build() { protected static void setupSourceFilterDeserializer(ObjectDeserializer op) { + op.add(Builder::excludeVectors, JsonpDeserializer.booleanDeserializer(), "exclude_vectors"); op.add(Builder::excludes, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "excludes", "exclude"); op.add(Builder::includes, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html index bd75df343..97b238b25 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html @@ -11,7 +11,7 @@ '_global.bulk.OperationBase': '_global/bulk/types.ts#L100-L117', '_global.bulk.OperationContainer': '_global/bulk/types.ts#L158-L180', '_global.bulk.OperationType': '_global/bulk/types.ts#L93-L98', -'_global.bulk.Request': '_global/bulk/BulkRequest.ts#L32-L247', +'_global.bulk.Request': '_global/bulk/BulkRequest.ts#L32-L251', '_global.bulk.Response': '_global/bulk/BulkResponse.ts#L24-L45', '_global.bulk.ResponseItem': '_global/bulk/types.ts#L37-L84', '_global.bulk.UpdateAction': '_global/bulk/types.ts#L182-L217', @@ -43,7 +43,7 @@ '_global.field_caps.Request': '_global/field_caps/FieldCapabilitiesRequest.ts#L25-L130', '_global.field_caps.Response': '_global/field_caps/FieldCapabilitiesResponse.ts#L24-L38', '_global.get.GetResult': '_global/get/types.ts#L25-L67', -'_global.get.Request': '_global/get/GetRequest.ts#L31-L180', +'_global.get.Request': '_global/get/GetRequest.ts#L31-L181', '_global.get.Response': '_global/get/GetResponse.ts#L23-L35', '_global.get_script.Request': '_global/get_script/GetScriptRequest.ts#L24-L56', '_global.get_script.Response': '_global/get_script/GetScriptResponse.ts#L23-L29', @@ -136,7 +136,7 @@ '_global.rank_eval.UnratedDocument': '_global/rank_eval/types.ts#L150-L153', '_global.reindex.Destination': '_global/reindex/types.ts#L39-L67', '_global.reindex.RemoteSource': '_global/reindex/types.ts#L112-L140', -'_global.reindex.Request': '_global/reindex/ReindexRequest.ts#L27-L309', +'_global.reindex.Request': '_global/reindex/ReindexRequest.ts#L27-L170', '_global.reindex.Response': '_global/reindex/ReindexResponse.ts#L26-L92', '_global.reindex.Source': '_global/reindex/types.ts#L69-L110', '_global.reindex_rethrottle.ReindexNode': '_global/reindex_rethrottle/types.ts#L33-L35', @@ -176,14 +176,14 @@ '_global.search._types.FetchProfileDebug': '_global/search/_types/profile.ts#L250-L253', '_global.search._types.FieldCollapse': '_global/search/_types/FieldCollapse.ts#L24-L41', '_global.search._types.FieldSuggester': '_global/search/_types/suggester.ts#L110-L143', -'_global.search._types.Highlight': '_global/search/_types/highlighting.ts#L152-L155', +'_global.search._types.Highlight': '_global/search/_types/highlighting.ts#L152-L157', '_global.search._types.HighlightBase': '_global/search/_types/highlighting.ts#L47-L150', -'_global.search._types.HighlightField': '_global/search/_types/highlighting.ts#L192-L195', -'_global.search._types.HighlighterEncoder': '_global/search/_types/highlighting.ts#L157-L160', -'_global.search._types.HighlighterFragmenter': '_global/search/_types/highlighting.ts#L162-L165', -'_global.search._types.HighlighterOrder': '_global/search/_types/highlighting.ts#L167-L169', -'_global.search._types.HighlighterTagsSchema': '_global/search/_types/highlighting.ts#L171-L173', -'_global.search._types.HighlighterType': '_global/search/_types/highlighting.ts#L175-L190', +'_global.search._types.HighlightField': '_global/search/_types/highlighting.ts#L194-L197', +'_global.search._types.HighlighterEncoder': '_global/search/_types/highlighting.ts#L159-L162', +'_global.search._types.HighlighterFragmenter': '_global/search/_types/highlighting.ts#L164-L167', +'_global.search._types.HighlighterOrder': '_global/search/_types/highlighting.ts#L169-L171', +'_global.search._types.HighlighterTagsSchema': '_global/search/_types/highlighting.ts#L173-L175', +'_global.search._types.HighlighterType': '_global/search/_types/highlighting.ts#L177-L192', '_global.search._types.Hit': '_global/search/_types/hits.ts#L40-L65', '_global.search._types.HitsMetadata': '_global/search/_types/hits.ts#L67-L73', '_global.search._types.InnerHits': '_global/search/_types/hits.ts#L107-L141', @@ -213,9 +213,9 @@ '_global.search._types.SearchRequestBody': '_global/search/_types/SearchRequestBody.ts#L42-L242', '_global.search._types.ShardProfile': '_global/search/_types/profile.ts#L142-L152', '_global.search._types.SmoothingModelContainer': '_global/search/_types/suggester.ts#L446-L462', -'_global.search._types.SourceConfig': '_global/search/_types/SourceFilter.ts#L33-L37', -'_global.search._types.SourceConfigParam': '_global/search/_types/SourceFilter.ts#L39-L45', -'_global.search._types.SourceFilter': '_global/search/_types/SourceFilter.ts#L23-L31', +'_global.search._types.SourceConfig': '_global/search/_types/SourceFilter.ts#L50-L54', +'_global.search._types.SourceConfigParam': '_global/search/_types/SourceFilter.ts#L56-L62', +'_global.search._types.SourceFilter': '_global/search/_types/SourceFilter.ts#L23-L48', '_global.search._types.StringDistance': '_global/search/_types/suggester.ts#L473-L494', '_global.search._types.StupidBackoffSmoothingModel': '_global/search/_types/suggester.ts#L464-L469', '_global.search._types.Suggest': '_global/search/_types/suggester.ts#L35-L41', @@ -234,17 +234,17 @@ '_global.search_mvt.Response': '_global/search_mvt/SearchMvtResponse.ts#L22-L25', '_global.search_mvt._types.GridAggregationType': '_global/search_mvt/_types/GridType.ts#L30-L33', '_global.search_mvt._types.GridType': '_global/search_mvt/_types/GridType.ts#L20-L28', -'_global.search_shards.Request': '_global/search_shards/SearchShardsRequest.ts#L24-L100', +'_global.search_shards.Request': '_global/search_shards/SearchShardsRequest.ts#L24-L99', '_global.search_shards.Response': '_global/search_shards/SearchShardsResponse.ts#L34-L40', '_global.search_shards.SearchShardsNodeAttributes': '_global/search_shards/SearchShardsResponse.ts#L42-L60', '_global.search_shards.ShardStoreIndex': '_global/search_shards/SearchShardsResponse.ts#L62-L65', -'_global.search_template.Request': '_global/search_template/SearchTemplateRequest.ts#L33-L154', +'_global.search_template.Request': '_global/search_template/SearchTemplateRequest.ts#L33-L153', '_global.search_template.Response': '_global/search_template/SearchTemplateResponse.ts#L30-L48', '_global.terms_enum.Request': '_global/terms_enum/TermsEnumRequest.ts#L26-L93', '_global.terms_enum.Response': '_global/terms_enum/TermsEnumResponse.ts#L22-L32', '_global.termvectors.FieldStatistics': '_global/termvectors/types.ts#L28-L32', '_global.termvectors.Filter': '_global/termvectors/types.ts#L49-L86', -'_global.termvectors.Request': '_global/termvectors/TermVectorsRequest.ts#L33-L239', +'_global.termvectors.Request': '_global/termvectors/TermVectorsRequest.ts#L33-L241', '_global.termvectors.Response': '_global/termvectors/TermVectorsResponse.ts#L25-L34', '_global.termvectors.Term': '_global/termvectors/types.ts#L34-L40', '_global.termvectors.TermVector': '_global/termvectors/types.ts#L23-L26', @@ -252,7 +252,7 @@ '_global.update.Request': '_global/update/UpdateRequest.ts#L38-L194', '_global.update.Response': '_global/update/UpdateResponse.ts#L27-L30', '_global.update.UpdateWriteResponseBase': '_global/update/UpdateResponse.ts#L23-L25', -'_global.update_by_query.Request': '_global/update_by_query/UpdateByQueryRequest.ts#L37-L340', +'_global.update_by_query.Request': '_global/update_by_query/UpdateByQueryRequest.ts#L37-L339', '_global.update_by_query.Response': '_global/update_by_query/UpdateByQueryResponse.ts#L26-L67', '_global.update_by_query_rethrottle.Request': '_global/update_by_query_rethrottle/UpdateByQueryRethrottleRequest.ts#L24-L56', '_global.update_by_query_rethrottle.Response': '_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25', @@ -302,12 +302,14 @@ '_types.IndicesOptions': '_types/common.ts#L332-L359', '_types.IndicesResponseBase': '_types/Base.ts#L138-L140', '_types.InlineGet': '_types/common.ts#L317-L330', +'_types.InnerRetriever': '_types/Retriever.ts#L82-L86', '_types.KnnQuery': '_types/Knn.ts#L64-L87', -'_types.KnnRetriever': '_types/Retriever.ts#L64-L82', +'_types.KnnRetriever': '_types/Retriever.ts#L112-L130', '_types.KnnSearch': '_types/Knn.ts#L35-L62', '_types.LatLonGeoLocation': '_types/Geo.ts#L120-L129', '_types.Level': '_types/common.ts#L246-L250', '_types.LifecycleOperationMode': '_types/Lifecycle.ts#L20-L24', +'_types.LinearRetriever': '_types/Retriever.ts#L68-L72', '_types.MergesStats': '_types/Stats.ts#L166-L183', '_types.NestedSortValue': '_types/sort.ts#L29-L34', '_types.NodeAttributes': '_types/Node.ts#L41-L52', @@ -316,10 +318,11 @@ '_types.NodeStatistics': '_types/Node.ts#L28-L39', '_types.Normalization': '_types/Similarity.ts#L52-L58', '_types.OpType': '_types/common.ts#L252-L261', +'_types.PinnedRetriever': '_types/Retriever.ts#L74-L80', '_types.PluginStats': '_types/Stats.ts#L185-L195', '_types.QueryCacheStats': '_types/Stats.ts#L197-L231', '_types.QueryVectorBuilder': '_types/Knn.ts#L89-L92', -'_types.RRFRetriever': '_types/Retriever.ts#L84-L91', +'_types.RRFRetriever': '_types/Retriever.ts#L132-L139', '_types.RankBase': '_types/Rank.ts#L30-L30', '_types.RankContainer': '_types/Rank.ts#L22-L28', '_types.RecoveryStats': '_types/Stats.ts#L233-L238', @@ -329,12 +332,14 @@ '_types.RequestBase': '_types/Base.ts#L34-L34', '_types.RequestCacheStats': '_types/Stats.ts#L249-L255', '_types.RescoreVector': '_types/Knn.ts#L30-L33', +'_types.RescorerRetriever': '_types/Retriever.ts#L62-L66', '_types.Result': '_types/Result.ts#L20-L26', '_types.Retries': '_types/Retries.ts#L22-L31', -'_types.RetrieverBase': '_types/Retriever.ts#L44-L49', -'_types.RetrieverContainer': '_types/Retriever.ts#L28-L42', +'_types.RetrieverBase': '_types/Retriever.ts#L53-L60', +'_types.RetrieverContainer': '_types/Retriever.ts#L28-L51', '_types.RrfRank': '_types/Rank.ts#L32-L37', -'_types.RuleRetriever': '_types/Retriever.ts#L106-L115', +'_types.RuleRetriever': '_types/Retriever.ts#L154-L163', +'_types.ScoreNormalizer': '_types/Retriever.ts#L88-L92', '_types.ScoreSort': '_types/sort.ts#L54-L56', '_types.Script': '_types/Scripting.ts#L65-L89', '_types.ScriptField': '_types/Scripting.ts#L91-L94', @@ -356,13 +361,14 @@ '_types.SortMode': '_types/sort.ts#L108-L117', '_types.SortOptions': '_types/sort.ts#L86-L96', '_types.SortOrder': '_types/sort.ts#L119-L128', -'_types.StandardRetriever': '_types/Retriever.ts#L51-L62', +'_types.SpecifiedDocument': '_types/Retriever.ts#L94-L97', +'_types.StandardRetriever': '_types/Retriever.ts#L99-L110', '_types.StoreStats': '_types/Stats.ts#L373-L400', '_types.StoredScript': '_types/Scripting.ts#L51-L63', '_types.SuggestMode': '_types/common.ts#L279-L292', '_types.TaskFailure': '_types/Errors.ts#L67-L72', '_types.TextEmbedding': '_types/Knn.ts#L94-L97', -'_types.TextSimilarityReranker': '_types/Retriever.ts#L93-L104', +'_types.TextSimilarityReranker': '_types/Retriever.ts#L141-L152', '_types.ThreadType': '_types/common.ts#L294-L300', '_types.TimeUnit': '_types/Time.ts#L69-L77', '_types.TopLeftBottomRightGeoBounds': '_types/Geo.ts#L161-L164', @@ -391,7 +397,7 @@ '_types.aggregations.AverageBucketAggregation': '_types/aggregations/pipeline.ts#L78-L81', '_types.aggregations.AvgAggregate': '_types/aggregations/Aggregate.ts#L218-L222', '_types.aggregations.BoxPlotAggregate': '_types/aggregations/Aggregate.ts#L806-L825', -'_types.aggregations.BoxplotAggregation': '_types/aggregations/metric.ts#L57-L62', +'_types.aggregations.BoxplotAggregation': '_types/aggregations/metric.ts#L57-L68', '_types.aggregations.BucketAggregationBase': '_types/aggregations/bucket.ts#L53-L58', '_types.aggregations.BucketCorrelationAggregation': '_types/aggregations/pipeline.ts#L139-L146', '_types.aggregations.BucketCorrelationFunction': '_types/aggregations/pipeline.ts#L148-L153', @@ -407,8 +413,8 @@ '_types.aggregations.BucketsPath': '_types/aggregations/pipeline.ts#L53-L59', '_types.aggregations.CalendarInterval': '_types/aggregations/bucket.ts#L262-L279', '_types.aggregations.CardinalityAggregate': '_types/aggregations/Aggregate.ts#L140-L143', -'_types.aggregations.CardinalityAggregation': '_types/aggregations/metric.ts#L87-L99', -'_types.aggregations.CardinalityExecutionMode': '_types/aggregations/metric.ts#L64-L85', +'_types.aggregations.CardinalityAggregation': '_types/aggregations/metric.ts#L93-L105', +'_types.aggregations.CardinalityExecutionMode': '_types/aggregations/metric.ts#L70-L91', '_types.aggregations.CategorizeTextAggregation': '_types/aggregations/bucket.ts#L1117-L1183', '_types.aggregations.CategorizeTextAnalyzer': '_types/aggregations/bucket.ts#L1185-L1188', '_types.aggregations.ChiSquareHeuristic': '_types/aggregations/bucket.ts#L782-L791', @@ -442,7 +448,7 @@ '_types.aggregations.EwmaMovingAverageAggregation': '_types/aggregations/pipeline.ts#L278-L281', '_types.aggregations.ExtendedBounds': '_types/aggregations/bucket.ts#L508-L517', '_types.aggregations.ExtendedStatsAggregate': '_types/aggregations/Aggregate.ts#L299-L320', -'_types.aggregations.ExtendedStatsAggregation': '_types/aggregations/metric.ts#L101-L106', +'_types.aggregations.ExtendedStatsAggregation': '_types/aggregations/metric.ts#L107-L112', '_types.aggregations.ExtendedStatsBucketAggregate': '_types/aggregations/Aggregate.ts#L322-L323', '_types.aggregations.ExtendedStatsBucketAggregation': '_types/aggregations/pipeline.ts#L218-L223', '_types.aggregations.FieldDateMath': '_types/aggregations/bucket.ts#L309-L316', @@ -458,9 +464,9 @@ '_types.aggregations.FrequentItemSetsField': '_types/aggregations/bucket.ts#L1227-L1239', '_types.aggregations.GapPolicy': '_types/aggregations/pipeline.ts#L61-L76', '_types.aggregations.GeoBoundsAggregate': '_types/aggregations/Aggregate.ts#L327-L333', -'_types.aggregations.GeoBoundsAggregation': '_types/aggregations/metric.ts#L108-L117', +'_types.aggregations.GeoBoundsAggregation': '_types/aggregations/metric.ts#L114-L123', '_types.aggregations.GeoCentroidAggregate': '_types/aggregations/Aggregate.ts#L335-L342', -'_types.aggregations.GeoCentroidAggregation': '_types/aggregations/metric.ts#L119-L122', +'_types.aggregations.GeoCentroidAggregation': '_types/aggregations/metric.ts#L125-L128', '_types.aggregations.GeoDistanceAggregate': '_types/aggregations/Aggregate.ts#L617-L622', '_types.aggregations.GeoDistanceAggregation': '_types/aggregations/bucket.ts#L396-L419', '_types.aggregations.GeoHashGridAggregate': '_types/aggregations/Aggregate.ts#L566-L568', @@ -469,9 +475,9 @@ '_types.aggregations.GeoHexGridAggregate': '_types/aggregations/Aggregate.ts#L585-L586', '_types.aggregations.GeoHexGridBucket': '_types/aggregations/Aggregate.ts#L588-L590', '_types.aggregations.GeoLineAggregate': '_types/aggregations/Aggregate.ts#L902-L912', -'_types.aggregations.GeoLineAggregation': '_types/aggregations/metric.ts#L124-L149', -'_types.aggregations.GeoLinePoint': '_types/aggregations/metric.ts#L158-L163', -'_types.aggregations.GeoLineSort': '_types/aggregations/metric.ts#L151-L156', +'_types.aggregations.GeoLineAggregation': '_types/aggregations/metric.ts#L130-L155', +'_types.aggregations.GeoLinePoint': '_types/aggregations/metric.ts#L164-L169', +'_types.aggregations.GeoLineSort': '_types/aggregations/metric.ts#L157-L162', '_types.aggregations.GeoTileGridAggregate': '_types/aggregations/Aggregate.ts#L574-L579', '_types.aggregations.GeoTileGridAggregation': '_types/aggregations/bucket.ts#L451-L477', '_types.aggregations.GeoTileGridBucket': '_types/aggregations/Aggregate.ts#L581-L583', @@ -479,7 +485,7 @@ '_types.aggregations.GlobalAggregate': '_types/aggregations/Aggregate.ts#L546-L550', '_types.aggregations.GlobalAggregation': '_types/aggregations/bucket.ts#L506-L506', '_types.aggregations.GoogleNormalizedDistanceHeuristic': '_types/aggregations/bucket.ts#L793-L798', -'_types.aggregations.HdrMethod': '_types/aggregations/metric.ts#L225-L230', +'_types.aggregations.HdrMethod': '_types/aggregations/metric.ts#L237-L242', '_types.aggregations.HdrPercentileRanksAggregate': '_types/aggregations/Aggregate.ts#L171-L172', '_types.aggregations.HdrPercentilesAggregate': '_types/aggregations/Aggregate.ts#L168-L169', '_types.aggregations.HistogramAggregate': '_types/aggregations/Aggregate.ts#L372-L376', @@ -513,13 +519,13 @@ '_types.aggregations.MatrixStatsAggregation': '_types/aggregations/matrix.ts#L38-L44', '_types.aggregations.MatrixStatsFields': '_types/aggregations/Aggregate.ts#L875-L884', '_types.aggregations.MaxAggregate': '_types/aggregations/Aggregate.ts#L205-L209', -'_types.aggregations.MaxAggregation': '_types/aggregations/metric.ts#L165-L165', +'_types.aggregations.MaxAggregation': '_types/aggregations/metric.ts#L171-L171', '_types.aggregations.MaxBucketAggregation': '_types/aggregations/pipeline.ts#L244-L247', '_types.aggregations.MedianAbsoluteDeviationAggregate': '_types/aggregations/Aggregate.ts#L196-L197', -'_types.aggregations.MedianAbsoluteDeviationAggregation': '_types/aggregations/metric.ts#L167-L176', +'_types.aggregations.MedianAbsoluteDeviationAggregation': '_types/aggregations/metric.ts#L173-L188', '_types.aggregations.MetricAggregationBase': '_types/aggregations/metric.ts#L34-L45', '_types.aggregations.MinAggregate': '_types/aggregations/Aggregate.ts#L199-L203', -'_types.aggregations.MinAggregation': '_types/aggregations/metric.ts#L178-L178', +'_types.aggregations.MinAggregation': '_types/aggregations/metric.ts#L190-L190', '_types.aggregations.MinBucketAggregation': '_types/aggregations/pipeline.ts#L249-L252', '_types.aggregations.MinimumInterval': '_types/aggregations/bucket.ts#L112-L119', '_types.aggregations.MissingAggregate': '_types/aggregations/Aggregate.ts#L528-L532', @@ -543,10 +549,10 @@ '_types.aggregations.ParentAggregate': '_types/aggregations/Aggregate.ts#L894-L898', '_types.aggregations.ParentAggregation': '_types/aggregations/bucket.ts#L662-L667', '_types.aggregations.PercentageScoreHeuristic': '_types/aggregations/bucket.ts#L811-L811', -'_types.aggregations.PercentileRanksAggregation': '_types/aggregations/metric.ts#L180-L202', +'_types.aggregations.PercentileRanksAggregation': '_types/aggregations/metric.ts#L192-L214', '_types.aggregations.Percentiles': '_types/aggregations/Aggregate.ts#L152-L153', '_types.aggregations.PercentilesAggregateBase': '_types/aggregations/Aggregate.ts#L148-L150', -'_types.aggregations.PercentilesAggregation': '_types/aggregations/metric.ts#L204-L223', +'_types.aggregations.PercentilesAggregation': '_types/aggregations/metric.ts#L216-L235', '_types.aggregations.PercentilesBucketAggregate': '_types/aggregations/Aggregate.ts#L180-L181', '_types.aggregations.PercentilesBucketAggregation': '_types/aggregations/pipeline.ts#L389-L397', '_types.aggregations.PipelineAggregationBase': '_types/aggregations/pipeline.ts#L39-L51', @@ -556,8 +562,8 @@ '_types.aggregations.RangeBucket': '_types/aggregations/Aggregate.ts#L600-L607', '_types.aggregations.RareTermsAggregation': '_types/aggregations/bucket.ts#L706-L739', '_types.aggregations.RateAggregate': '_types/aggregations/Aggregate.ts#L847-L854', -'_types.aggregations.RateAggregation': '_types/aggregations/metric.ts#L239-L250', -'_types.aggregations.RateMode': '_types/aggregations/metric.ts#L252-L261', +'_types.aggregations.RateAggregation': '_types/aggregations/metric.ts#L262-L273', +'_types.aggregations.RateMode': '_types/aggregations/metric.ts#L275-L284', '_types.aggregations.ReverseNestedAggregate': '_types/aggregations/Aggregate.ts#L540-L544', '_types.aggregations.ReverseNestedAggregation': '_types/aggregations/bucket.ts#L741-L747', '_types.aggregations.SamplerAggregate': '_types/aggregations/Aggregate.ts#L558-L559', @@ -565,7 +571,7 @@ '_types.aggregations.SamplerAggregationExecutionHint': '_types/aggregations/bucket.ts#L359-L372', '_types.aggregations.ScriptedHeuristic': '_types/aggregations/bucket.ts#L813-L815', '_types.aggregations.ScriptedMetricAggregate': '_types/aggregations/Aggregate.ts#L739-L745', -'_types.aggregations.ScriptedMetricAggregation': '_types/aggregations/metric.ts#L263-L289', +'_types.aggregations.ScriptedMetricAggregation': '_types/aggregations/metric.ts#L286-L312', '_types.aggregations.SerialDifferencingAggregation': '_types/aggregations/pipeline.ts#L399-L408', '_types.aggregations.SignificantLongTermsAggregate': '_types/aggregations/Aggregate.ts#L668-L670', '_types.aggregations.SignificantLongTermsBucket': '_types/aggregations/Aggregate.ts#L677-L680', @@ -582,24 +588,25 @@ '_types.aggregations.StandardDeviationBounds': '_types/aggregations/Aggregate.ts#L281-L288', '_types.aggregations.StandardDeviationBoundsAsString': '_types/aggregations/Aggregate.ts#L290-L297', '_types.aggregations.StatsAggregate': '_types/aggregations/Aggregate.ts#L257-L273', -'_types.aggregations.StatsAggregation': '_types/aggregations/metric.ts#L291-L291', +'_types.aggregations.StatsAggregation': '_types/aggregations/metric.ts#L314-L314', '_types.aggregations.StatsBucketAggregate': '_types/aggregations/Aggregate.ts#L275-L279', '_types.aggregations.StatsBucketAggregation': '_types/aggregations/pipeline.ts#L410-L410', '_types.aggregations.StringRareTermsAggregate': '_types/aggregations/Aggregate.ts#L483-L487', '_types.aggregations.StringRareTermsBucket': '_types/aggregations/Aggregate.ts#L489-L491', '_types.aggregations.StringStatsAggregate': '_types/aggregations/Aggregate.ts#L793-L804', -'_types.aggregations.StringStatsAggregation': '_types/aggregations/metric.ts#L293-L299', +'_types.aggregations.StringStatsAggregation': '_types/aggregations/metric.ts#L316-L322', '_types.aggregations.StringTermsAggregate': '_types/aggregations/Aggregate.ts#L424-L429', '_types.aggregations.StringTermsBucket': '_types/aggregations/Aggregate.ts#L435-L437', '_types.aggregations.SumAggregate': '_types/aggregations/Aggregate.ts#L211-L216', -'_types.aggregations.SumAggregation': '_types/aggregations/metric.ts#L301-L301', +'_types.aggregations.SumAggregation': '_types/aggregations/metric.ts#L324-L324', '_types.aggregations.SumBucketAggregation': '_types/aggregations/pipeline.ts#L412-L415', -'_types.aggregations.TDigest': '_types/aggregations/metric.ts#L232-L237', +'_types.aggregations.TDigest': '_types/aggregations/metric.ts#L244-L255', +'_types.aggregations.TDigestExecutionHint': '_types/aggregations/metric.ts#L257-L260', '_types.aggregations.TDigestPercentileRanksAggregate': '_types/aggregations/Aggregate.ts#L177-L178', '_types.aggregations.TDigestPercentilesAggregate': '_types/aggregations/Aggregate.ts#L174-L175', '_types.aggregations.TTestAggregate': '_types/aggregations/Aggregate.ts#L838-L845', -'_types.aggregations.TTestAggregation': '_types/aggregations/metric.ts#L303-L317', -'_types.aggregations.TTestType': '_types/aggregations/metric.ts#L331-L344', +'_types.aggregations.TTestAggregation': '_types/aggregations/metric.ts#L326-L340', +'_types.aggregations.TTestType': '_types/aggregations/metric.ts#L354-L367', '_types.aggregations.TermsAggregateBase': '_types/aggregations/Aggregate.ts#L417-L422', '_types.aggregations.TermsAggregation': '_types/aggregations/bucket.ts#L963-L1031', '_types.aggregations.TermsAggregationCollectMode': '_types/aggregations/bucket.ts#L1056-L1065', @@ -608,28 +615,28 @@ '_types.aggregations.TermsExclude': '_types/aggregations/bucket.ts#L1077-L1078', '_types.aggregations.TermsInclude': '_types/aggregations/bucket.ts#L1074-L1075', '_types.aggregations.TermsPartition': '_types/aggregations/bucket.ts#L1080-L1089', -'_types.aggregations.TestPopulation': '_types/aggregations/metric.ts#L319-L329', +'_types.aggregations.TestPopulation': '_types/aggregations/metric.ts#L342-L352', '_types.aggregations.TimeSeriesAggregate': '_types/aggregations/Aggregate.ts#L730-L731', '_types.aggregations.TimeSeriesAggregation': '_types/aggregations/bucket.ts#L1033-L1046', '_types.aggregations.TimeSeriesBucket': '_types/aggregations/Aggregate.ts#L733-L735', '_types.aggregations.TopHitsAggregate': '_types/aggregations/Aggregate.ts#L747-L753', -'_types.aggregations.TopHitsAggregation': '_types/aggregations/metric.ts#L346-L406', +'_types.aggregations.TopHitsAggregation': '_types/aggregations/metric.ts#L369-L429', '_types.aggregations.TopMetrics': '_types/aggregations/Aggregate.ts#L832-L836', '_types.aggregations.TopMetricsAggregate': '_types/aggregations/Aggregate.ts#L827-L830', -'_types.aggregations.TopMetricsAggregation': '_types/aggregations/metric.ts#L408-L425', -'_types.aggregations.TopMetricsValue': '_types/aggregations/metric.ts#L427-L432', +'_types.aggregations.TopMetricsAggregation': '_types/aggregations/metric.ts#L431-L448', +'_types.aggregations.TopMetricsValue': '_types/aggregations/metric.ts#L450-L455', '_types.aggregations.UnmappedRareTermsAggregate': '_types/aggregations/Aggregate.ts#L493-L499', '_types.aggregations.UnmappedSamplerAggregate': '_types/aggregations/Aggregate.ts#L561-L562', '_types.aggregations.UnmappedSignificantTermsAggregate': '_types/aggregations/Aggregate.ts#L690-L696', '_types.aggregations.UnmappedTermsAggregate': '_types/aggregations/Aggregate.ts#L463-L469', '_types.aggregations.ValueCountAggregate': '_types/aggregations/Aggregate.ts#L231-L236', -'_types.aggregations.ValueCountAggregation': '_types/aggregations/metric.ts#L434-L434', -'_types.aggregations.ValueType': '_types/aggregations/metric.ts#L436-L447', +'_types.aggregations.ValueCountAggregation': '_types/aggregations/metric.ts#L457-L457', +'_types.aggregations.ValueType': '_types/aggregations/metric.ts#L459-L470', '_types.aggregations.VariableWidthHistogramAggregate': '_types/aggregations/Aggregate.ts#L402-L404', '_types.aggregations.VariableWidthHistogramAggregation': '_types/aggregations/bucket.ts#L1091-L1115', '_types.aggregations.VariableWidthHistogramBucket': '_types/aggregations/Aggregate.ts#L406-L413', -'_types.aggregations.WeightedAverageAggregation': '_types/aggregations/metric.ts#L449-L463', -'_types.aggregations.WeightedAverageValue': '_types/aggregations/metric.ts#L465-L475', +'_types.aggregations.WeightedAverageAggregation': '_types/aggregations/metric.ts#L472-L486', +'_types.aggregations.WeightedAverageValue': '_types/aggregations/metric.ts#L488-L498', '_types.aggregations.WeightedAvgAggregate': '_types/aggregations/Aggregate.ts#L224-L229', '_types.analysis.Analyzer': '_types/analysis/analyzers.ts#L427-L480', '_types.analysis.ApostropheTokenFilter': '_types/analysis/token_filters.ts#L450-L452', @@ -939,23 +946,25 @@ '_types.query_dsl.IdsQuery': '_types/query_dsl/term.ts#L88-L96', '_types.query_dsl.IntervalsAllOf': '_types/query_dsl/fulltext.ts#L50-L70', '_types.query_dsl.IntervalsAnyOf': '_types/query_dsl/fulltext.ts#L72-L81', -'_types.query_dsl.IntervalsContainer': '_types/query_dsl/fulltext.ts#L83-L110', -'_types.query_dsl.IntervalsFilter': '_types/query_dsl/fulltext.ts#L112-L152', -'_types.query_dsl.IntervalsFuzzy': '_types/query_dsl/fulltext.ts#L154-L184', -'_types.query_dsl.IntervalsMatch': '_types/query_dsl/fulltext.ts#L186-L216', -'_types.query_dsl.IntervalsPrefix': '_types/query_dsl/fulltext.ts#L218-L233', -'_types.query_dsl.IntervalsQuery': '_types/query_dsl/fulltext.ts#L235-L266', -'_types.query_dsl.IntervalsWildcard': '_types/query_dsl/fulltext.ts#L268-L283', +'_types.query_dsl.IntervalsContainer': '_types/query_dsl/fulltext.ts#L83-L112', +'_types.query_dsl.IntervalsFilter': '_types/query_dsl/fulltext.ts#L114-L154', +'_types.query_dsl.IntervalsFuzzy': '_types/query_dsl/fulltext.ts#L156-L186', +'_types.query_dsl.IntervalsMatch': '_types/query_dsl/fulltext.ts#L188-L218', +'_types.query_dsl.IntervalsPrefix': '_types/query_dsl/fulltext.ts#L220-L235', +'_types.query_dsl.IntervalsQuery': '_types/query_dsl/fulltext.ts#L283-L317', +'_types.query_dsl.IntervalsRange': '_types/query_dsl/fulltext.ts#L237-L264', +'_types.query_dsl.IntervalsRegexp': '_types/query_dsl/fulltext.ts#L266-L281', +'_types.query_dsl.IntervalsWildcard': '_types/query_dsl/fulltext.ts#L319-L334', '_types.query_dsl.Like': '_types/query_dsl/specialized.ts#L198-L203', '_types.query_dsl.LikeDocument': '_types/query_dsl/specialized.ts#L174-L196', '_types.query_dsl.MatchAllQuery': '_types/query_dsl/MatchAllQuery.ts#L22-L25', -'_types.query_dsl.MatchBoolPrefixQuery': '_types/query_dsl/fulltext.ts#L355-L412', +'_types.query_dsl.MatchBoolPrefixQuery': '_types/query_dsl/fulltext.ts#L406-L463', '_types.query_dsl.MatchNoneQuery': '_types/query_dsl/MatchNoneQuery.ts#L22-L25', -'_types.query_dsl.MatchPhrasePrefixQuery': '_types/query_dsl/fulltext.ts#L440-L469', -'_types.query_dsl.MatchPhraseQuery': '_types/query_dsl/fulltext.ts#L414-L438', -'_types.query_dsl.MatchQuery': '_types/query_dsl/fulltext.ts#L285-L353', +'_types.query_dsl.MatchPhrasePrefixQuery': '_types/query_dsl/fulltext.ts#L491-L520', +'_types.query_dsl.MatchPhraseQuery': '_types/query_dsl/fulltext.ts#L465-L489', +'_types.query_dsl.MatchQuery': '_types/query_dsl/fulltext.ts#L336-L404', '_types.query_dsl.MoreLikeThisQuery': '_types/query_dsl/specialized.ts#L87-L172', -'_types.query_dsl.MultiMatchQuery': '_types/query_dsl/fulltext.ts#L471-L557', +'_types.query_dsl.MultiMatchQuery': '_types/query_dsl/fulltext.ts#L522-L608', '_types.query_dsl.MultiValueMode': '_types/query_dsl/compound.ts#L368-L385', '_types.query_dsl.NestedQuery': '_types/query_dsl/joining.ts#L112-L139', '_types.query_dsl.NumberRangeQuery': '_types/query_dsl/term.ts#L168-L168', @@ -968,7 +977,7 @@ '_types.query_dsl.PrefixQuery': '_types/query_dsl/term.ts#L98-L120', '_types.query_dsl.QueryBase': '_types/query_dsl/abstractions.ts#L459-L470', '_types.query_dsl.QueryContainer': '_types/query_dsl/abstractions.ts#L103-L434', -'_types.query_dsl.QueryStringQuery': '_types/query_dsl/fulltext.ts#L598-L721', +'_types.query_dsl.QueryStringQuery': '_types/query_dsl/fulltext.ts#L649-L772', '_types.query_dsl.RandomScoreFunction': '_types/query_dsl/compound.ts#L144-L147', '_types.query_dsl.RangeQuery': '_types/query_dsl/term.ts#L172-L182', '_types.query_dsl.RangeQueryBase': '_types/query_dsl/term.ts#L122-L144', @@ -980,15 +989,15 @@ '_types.query_dsl.RankFeatureFunctionSigmoid': '_types/query_dsl/specialized.ts#L298-L307', '_types.query_dsl.RankFeatureQuery': '_types/query_dsl/specialized.ts#L309-L335', '_types.query_dsl.RegexpQuery': '_types/query_dsl/term.ts#L199-L232', -'_types.query_dsl.RuleQuery': '_types/query_dsl/specialized.ts#L398-L405', +'_types.query_dsl.RuleQuery': '_types/query_dsl/specialized.ts#L398-L406', '_types.query_dsl.ScriptQuery': '_types/query_dsl/specialized.ts#L337-L346', '_types.query_dsl.ScriptScoreFunction': '_types/query_dsl/compound.ts#L137-L142', '_types.query_dsl.ScriptScoreQuery': '_types/query_dsl/specialized.ts#L348-L365', '_types.query_dsl.SemanticQuery': '_types/query_dsl/SemanticQuery.ts#L22-L30', '_types.query_dsl.ShapeFieldQuery': '_types/query_dsl/specialized.ts#L383-L396', '_types.query_dsl.ShapeQuery': '_types/query_dsl/specialized.ts#L367-L381', -'_types.query_dsl.SimpleQueryStringFlag': '_types/query_dsl/fulltext.ts#L729-L784', -'_types.query_dsl.SimpleQueryStringQuery': '_types/query_dsl/fulltext.ts#L786-L854', +'_types.query_dsl.SimpleQueryStringFlag': '_types/query_dsl/fulltext.ts#L780-L835', +'_types.query_dsl.SimpleQueryStringQuery': '_types/query_dsl/fulltext.ts#L837-L905', '_types.query_dsl.SpanContainingQuery': '_types/query_dsl/span.ts#L25-L39', '_types.query_dsl.SpanFieldMaskingQuery': '_types/query_dsl/span.ts#L41-L47', '_types.query_dsl.SpanFirstQuery': '_types/query_dsl/span.ts#L49-L61', @@ -1007,7 +1016,7 @@ '_types.query_dsl.TermsQueryField': '_types/query_dsl/term.ts#L261-L264', '_types.query_dsl.TermsSetQuery': '_types/query_dsl/term.ts#L273-L295', '_types.query_dsl.TextExpansionQuery': '_types/query_dsl/TextExpansionQuery.ts#L23-L36', -'_types.query_dsl.TextQueryType': '_types/query_dsl/fulltext.ts#L559-L585', +'_types.query_dsl.TextQueryType': '_types/query_dsl/fulltext.ts#L610-L636', '_types.query_dsl.TokenPruningConfig': '_types/query_dsl/TokenPruningConfig.ts#L22-L35', '_types.query_dsl.TypeQuery': '_types/query_dsl/term.ts#L297-L299', '_types.query_dsl.UntypedDecayFunction': '_types/query_dsl/compound.ts#L204-L207', @@ -1016,7 +1025,7 @@ '_types.query_dsl.WeightedTokensQuery': '_types/query_dsl/WeightedTokensQuery.ts#L25-L33', '_types.query_dsl.WildcardQuery': '_types/query_dsl/term.ts#L301-L321', '_types.query_dsl.WrapperQuery': '_types/query_dsl/abstractions.ts#L508-L517', -'_types.query_dsl.ZeroTermsQuery': '_types/query_dsl/fulltext.ts#L587-L596', +'_types.query_dsl.ZeroTermsQuery': '_types/query_dsl/fulltext.ts#L638-L647', 'async_search._types.AsyncSearch': 'async_search/_types/AsyncSearch.ts#L30-L56', 'async_search._types.AsyncSearchDocumentResponseBase': 'async_search/_types/AsyncSearchResponseBase.ts#L52-L56', 'async_search._types.AsyncSearchResponseBase': 'async_search/_types/AsyncSearchResponseBase.ts#L24-L51', @@ -1164,9 +1173,9 @@ 'ccr.stats.Response': 'ccr/stats/CcrStatsResponse.ts#L22-L29', 'ccr.unfollow.Request': 'ccr/unfollow/UnfollowIndexRequest.ts#L24-L59', 'ccr.unfollow.Response': 'ccr/unfollow/UnfollowIndexResponse.ts#L22-L25', -'cluster._types.ComponentTemplate': 'cluster/_types/ComponentTemplate.ts#L27-L30', -'cluster._types.ComponentTemplateNode': 'cluster/_types/ComponentTemplate.ts#L32-L41', -'cluster._types.ComponentTemplateSummary': 'cluster/_types/ComponentTemplate.ts#L43-L55', +'cluster._types.ComponentTemplate': 'cluster/_types/ComponentTemplate.ts#L28-L31', +'cluster._types.ComponentTemplateNode': 'cluster/_types/ComponentTemplate.ts#L33-L42', +'cluster._types.ComponentTemplateSummary': 'cluster/_types/ComponentTemplate.ts#L44-L61', 'cluster.allocation_explain.AllocationDecision': 'cluster/allocation_explain/types.ts#L27-L31', 'cluster.allocation_explain.AllocationExplainDecision': 'cluster/allocation_explain/types.ts#L33-L38', 'cluster.allocation_explain.AllocationStore': 'cluster/allocation_explain/types.ts#L40-L47', @@ -1176,7 +1185,7 @@ 'cluster.allocation_explain.DiskUsage': 'cluster/allocation_explain/types.ts#L63-L70', 'cluster.allocation_explain.NodeAllocationExplanation': 'cluster/allocation_explain/types.ts#L103-L117', 'cluster.allocation_explain.NodeDiskUsage': 'cluster/allocation_explain/types.ts#L57-L61', -'cluster.allocation_explain.Request': 'cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L25-L79', +'cluster.allocation_explain.Request': 'cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L25-L81', 'cluster.allocation_explain.ReservedSize': 'cluster/allocation_explain/types.ts#L72-L77', 'cluster.allocation_explain.Response': 'cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64', 'cluster.allocation_explain.UnassignedInformation': 'cluster/allocation_explain/types.ts#L128-L136', @@ -1187,8 +1196,8 @@ 'cluster.exists_component_template.Request': 'cluster/exists_component_template/ClusterComponentTemplateExistsRequest.ts#L24-L62', 'cluster.get_component_template.Request': 'cluster/get_component_template/ClusterGetComponentTemplateRequest.ts#L24-L77', 'cluster.get_component_template.Response': 'cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24', -'cluster.get_settings.Request': 'cluster/get_settings/ClusterGetSettingsRequest.ts#L23-L63', -'cluster.get_settings.Response': 'cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29', +'cluster.get_settings.Request': 'cluster/get_settings/ClusterGetSettingsRequest.ts#L23-L65', +'cluster.get_settings.Response': 'cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L32', 'cluster.health.HealthResponseBody': 'cluster/health/ClusterHealthResponse.ts#L40-L77', 'cluster.health.IndexHealthStats': 'cluster/health/types.ts#L24-L35', 'cluster.health.Request': 'cluster/health/ClusterHealthRequest.ts#L32-L118', @@ -1202,7 +1211,7 @@ 'cluster.post_voting_config_exclusions.Request': 'cluster/post_voting_config_exclusions/ClusterPostVotingConfigExclusionsRequest.ts#L24-L80', 'cluster.put_component_template.Request': 'cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L105', 'cluster.put_component_template.Response': 'cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L25', -'cluster.put_settings.Request': 'cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L68', +'cluster.put_settings.Request': 'cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L71', 'cluster.put_settings.Response': 'cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29', 'cluster.remote_info.ClusterRemoteInfo': 'cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30', 'cluster.remote_info.ClusterRemoteProxyInfo': 'cluster/remote_info/ClusterRemoteInfoResponse.ts#L58-L83', @@ -1431,10 +1440,10 @@ 'ilm._types.WaitForSnapshotAction': 'ilm/_types/Phase.ts#L145-L147', 'ilm.delete_lifecycle.Request': 'ilm/delete_lifecycle/DeleteLifecycleRequest.ts#L24-L58', 'ilm.delete_lifecycle.Response': 'ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L25', -'ilm.explain_lifecycle.LifecycleExplain': 'ilm/explain_lifecycle/types.ts#L64-L67', -'ilm.explain_lifecycle.LifecycleExplainManaged': 'ilm/explain_lifecycle/types.ts#L27-L57', -'ilm.explain_lifecycle.LifecycleExplainPhaseExecution': 'ilm/explain_lifecycle/types.ts#L69-L74', -'ilm.explain_lifecycle.LifecycleExplainUnmanaged': 'ilm/explain_lifecycle/types.ts#L59-L62', +'ilm.explain_lifecycle.LifecycleExplain': 'ilm/explain_lifecycle/types.ts#L65-L68', +'ilm.explain_lifecycle.LifecycleExplainManaged': 'ilm/explain_lifecycle/types.ts#L27-L58', +'ilm.explain_lifecycle.LifecycleExplainPhaseExecution': 'ilm/explain_lifecycle/types.ts#L70-L75', +'ilm.explain_lifecycle.LifecycleExplainUnmanaged': 'ilm/explain_lifecycle/types.ts#L60-L63', 'ilm.explain_lifecycle.Request': 'ilm/explain_lifecycle/ExplainLifecycleRequest.ts#L24-L64', 'ilm.explain_lifecycle.Response': 'ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28', 'ilm.get_lifecycle.Lifecycle': 'ilm/get_lifecycle/types.ts#L24-L28', @@ -1459,21 +1468,27 @@ 'ilm.stop.Response': 'ilm/stop/StopIlmResponse.ts#L22-L25', 'indices._types.Alias': 'indices/_types/Alias.ts#L23-L53', 'indices._types.AliasDefinition': 'indices/_types/AliasDefinition.ts#L22-L54', -'indices._types.CacheQueries': 'indices/_types/IndexSettings.ts#L421-L423', -'indices._types.DataStream': 'indices/_types/DataStream.ts#L53-L139', -'indices._types.DataStreamIndex': 'indices/_types/DataStream.ts#L148-L173', +'indices._types.CacheQueries': 'indices/_types/IndexSettings.ts#L423-L425', +'indices._types.DataStream': 'indices/_types/DataStream.ts#L54-L145', +'indices._types.DataStreamFailureStore': 'indices/_types/DataStreamFailureStore.ts#L22-L37', +'indices._types.DataStreamFailureStoreTemplate': 'indices/_types/DataStreamFailureStore.ts#L39-L54', +'indices._types.DataStreamIndex': 'indices/_types/DataStream.ts#L154-L179', 'indices._types.DataStreamLifecycle': 'indices/_types/DataStreamLifecycle.ts#L25-L45', 'indices._types.DataStreamLifecycleDownsampling': 'indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27', 'indices._types.DataStreamLifecycleRolloverConditions': 'indices/_types/DataStreamLifecycle.ts#L60-L72', 'indices._types.DataStreamLifecycleWithRollover': 'indices/_types/DataStreamLifecycle.ts#L47-L58', -'indices._types.DataStreamTimestampField': 'indices/_types/DataStream.ts#L141-L146', -'indices._types.DataStreamVisibility': 'indices/_types/DataStream.ts#L175-L178', +'indices._types.DataStreamOptions': 'indices/_types/DataStreamOptions.ts#L25-L34', +'indices._types.DataStreamOptionsTemplate': 'indices/_types/DataStreamOptions.ts#L36-L41', +'indices._types.DataStreamTimestampField': 'indices/_types/DataStream.ts#L147-L152', +'indices._types.DataStreamVisibility': 'indices/_types/DataStream.ts#L181-L184', 'indices._types.DownsampleConfig': 'indices/_types/Downsample.ts#L22-L27', 'indices._types.DownsamplingRound': 'indices/_types/DownsamplingRound.ts#L23-L32', -'indices._types.FailureStore': 'indices/_types/DataStream.ts#L47-L51', +'indices._types.FailureStore': 'indices/_types/DataStream.ts#L48-L52', +'indices._types.FailureStoreLifecycle': 'indices/_types/DataStreamFailureStore.ts#L56-L72', +'indices._types.FailureStoreLifecycleTemplate': 'indices/_types/DataStreamFailureStore.ts#L74-L90', 'indices._types.FielddataFrequencyFilter': 'indices/_types/FielddataFrequencyFilter.ts#L22-L26', -'indices._types.IndexCheckOnStartup': 'indices/_types/IndexSettings.ts#L270-L277', -'indices._types.IndexMode': 'indices/_types/DataStream.ts#L40-L45', +'indices._types.IndexCheckOnStartup': 'indices/_types/IndexSettings.ts#L272-L279', +'indices._types.IndexMode': 'indices/_types/DataStream.ts#L41-L46', 'indices._types.IndexRouting': 'indices/_types/IndexRouting.ts#L22-L25', 'indices._types.IndexRoutingAllocation': 'indices/_types/IndexRouting.ts#L27-L32', 'indices._types.IndexRoutingAllocationDisk': 'indices/_types/IndexRouting.ts#L62-L64', @@ -1483,64 +1498,64 @@ 'indices._types.IndexRoutingRebalance': 'indices/_types/IndexRouting.ts#L34-L36', 'indices._types.IndexRoutingRebalanceOptions': 'indices/_types/IndexRouting.ts#L45-L50', 'indices._types.IndexSegmentSort': 'indices/_types/IndexSegmentSort.ts#L22-L27', -'indices._types.IndexSettingBlocks': 'indices/_types/IndexSettings.ts#L262-L268', -'indices._types.IndexSettings': 'indices/_types/IndexSettings.ts#L70-L176', -'indices._types.IndexSettingsAnalysis': 'indices/_types/IndexSettings.ts#L333-L339', -'indices._types.IndexSettingsLifecycle': 'indices/_types/IndexSettings.ts#L284-L323', -'indices._types.IndexSettingsLifecycleStep': 'indices/_types/IndexSettings.ts#L325-L331', -'indices._types.IndexSettingsTimeSeries': 'indices/_types/IndexSettings.ts#L341-L344', +'indices._types.IndexSettingBlocks': 'indices/_types/IndexSettings.ts#L264-L270', +'indices._types.IndexSettings': 'indices/_types/IndexSettings.ts#L70-L178', +'indices._types.IndexSettingsAnalysis': 'indices/_types/IndexSettings.ts#L335-L341', +'indices._types.IndexSettingsLifecycle': 'indices/_types/IndexSettings.ts#L286-L325', +'indices._types.IndexSettingsLifecycleStep': 'indices/_types/IndexSettings.ts#L327-L333', +'indices._types.IndexSettingsTimeSeries': 'indices/_types/IndexSettings.ts#L343-L346', 'indices._types.IndexState': 'indices/_types/IndexState.ts#L27-L40', -'indices._types.IndexTemplate': 'indices/_types/IndexTemplate.ts#L28-L81', -'indices._types.IndexTemplateDataStreamConfiguration': 'indices/_types/IndexTemplate.ts#L83-L94', -'indices._types.IndexTemplateSummary': 'indices/_types/IndexTemplate.ts#L96-L118', -'indices._types.IndexVersioning': 'indices/_types/IndexSettings.ts#L279-L282', -'indices._types.IndexingPressure': 'indices/_types/IndexSettings.ts#L575-L577', -'indices._types.IndexingPressureMemory': 'indices/_types/IndexSettings.ts#L579-L586', -'indices._types.IndexingSlowlogSettings': 'indices/_types/IndexSettings.ts#L588-L593', -'indices._types.IndexingSlowlogTresholds': 'indices/_types/IndexSettings.ts#L595-L602', -'indices._types.ManagedBy': 'indices/_types/DataStream.ts#L32-L37', -'indices._types.MappingLimitSettings': 'indices/_types/IndexSettings.ts#L425-L439', -'indices._types.MappingLimitSettingsDepth': 'indices/_types/IndexSettings.ts#L460-L467', -'indices._types.MappingLimitSettingsDimensionFields': 'indices/_types/IndexSettings.ts#L497-L503', -'indices._types.MappingLimitSettingsFieldNameLength': 'indices/_types/IndexSettings.ts#L488-L495', -'indices._types.MappingLimitSettingsNestedFields': 'indices/_types/IndexSettings.ts#L469-L477', -'indices._types.MappingLimitSettingsNestedObjects': 'indices/_types/IndexSettings.ts#L479-L486', -'indices._types.MappingLimitSettingsSourceFields': 'indices/_types/IndexSettings.ts#L505-L507', -'indices._types.MappingLimitSettingsTotalFields': 'indices/_types/IndexSettings.ts#L441-L458', -'indices._types.Merge': 'indices/_types/IndexSettings.ts#L346-L348', -'indices._types.MergeScheduler': 'indices/_types/IndexSettings.ts#L350-L353', +'indices._types.IndexTemplate': 'indices/_types/IndexTemplate.ts#L29-L82', +'indices._types.IndexTemplateDataStreamConfiguration': 'indices/_types/IndexTemplate.ts#L84-L95', +'indices._types.IndexTemplateSummary': 'indices/_types/IndexTemplate.ts#L97-L124', +'indices._types.IndexVersioning': 'indices/_types/IndexSettings.ts#L281-L284', +'indices._types.IndexingPressure': 'indices/_types/IndexSettings.ts#L577-L579', +'indices._types.IndexingPressureMemory': 'indices/_types/IndexSettings.ts#L581-L588', +'indices._types.IndexingSlowlogSettings': 'indices/_types/IndexSettings.ts#L590-L595', +'indices._types.IndexingSlowlogTresholds': 'indices/_types/IndexSettings.ts#L597-L604', +'indices._types.ManagedBy': 'indices/_types/DataStream.ts#L33-L38', +'indices._types.MappingLimitSettings': 'indices/_types/IndexSettings.ts#L427-L441', +'indices._types.MappingLimitSettingsDepth': 'indices/_types/IndexSettings.ts#L462-L469', +'indices._types.MappingLimitSettingsDimensionFields': 'indices/_types/IndexSettings.ts#L499-L505', +'indices._types.MappingLimitSettingsFieldNameLength': 'indices/_types/IndexSettings.ts#L490-L497', +'indices._types.MappingLimitSettingsNestedFields': 'indices/_types/IndexSettings.ts#L471-L479', +'indices._types.MappingLimitSettingsNestedObjects': 'indices/_types/IndexSettings.ts#L481-L488', +'indices._types.MappingLimitSettingsSourceFields': 'indices/_types/IndexSettings.ts#L507-L509', +'indices._types.MappingLimitSettingsTotalFields': 'indices/_types/IndexSettings.ts#L443-L460', +'indices._types.Merge': 'indices/_types/IndexSettings.ts#L348-L350', +'indices._types.MergeScheduler': 'indices/_types/IndexSettings.ts#L352-L355', 'indices._types.NumericFielddata': 'indices/_types/NumericFielddata.ts#L22-L24', 'indices._types.NumericFielddataFormat': 'indices/_types/NumericFielddataFormat.ts#L20-L23', -'indices._types.Queries': 'indices/_types/IndexSettings.ts#L417-L419', +'indices._types.Queries': 'indices/_types/IndexSettings.ts#L419-L421', 'indices._types.RetentionLease': 'indices/_types/IndexSettings.ts#L66-L68', -'indices._types.SearchIdle': 'indices/_types/IndexSettings.ts#L253-L256', +'indices._types.SearchIdle': 'indices/_types/IndexSettings.ts#L255-L258', 'indices._types.SegmentSortMissing': 'indices/_types/IndexSegmentSort.ts#L43-L46', 'indices._types.SegmentSortMode': 'indices/_types/IndexSegmentSort.ts#L36-L41', 'indices._types.SegmentSortOrder': 'indices/_types/IndexSegmentSort.ts#L29-L34', -'indices._types.SettingsAnalyze': 'indices/_types/IndexSettings.ts#L243-L246', -'indices._types.SettingsHighlight': 'indices/_types/IndexSettings.ts#L238-L241', -'indices._types.SettingsQueryString': 'indices/_types/IndexSettings.ts#L258-L260', -'indices._types.SettingsSearch': 'indices/_types/IndexSettings.ts#L248-L251', -'indices._types.SettingsSimilarity': 'indices/_types/IndexSettings.ts#L178-L190', -'indices._types.SettingsSimilarityBm25': 'indices/_types/IndexSettings.ts#L196-L201', -'indices._types.SettingsSimilarityBoolean': 'indices/_types/IndexSettings.ts#L192-L194', -'indices._types.SettingsSimilarityDfi': 'indices/_types/IndexSettings.ts#L203-L206', -'indices._types.SettingsSimilarityDfr': 'indices/_types/IndexSettings.ts#L208-L213', -'indices._types.SettingsSimilarityIb': 'indices/_types/IndexSettings.ts#L215-L220', -'indices._types.SettingsSimilarityLmd': 'indices/_types/IndexSettings.ts#L222-L225', -'indices._types.SettingsSimilarityLmj': 'indices/_types/IndexSettings.ts#L227-L230', -'indices._types.SettingsSimilarityScripted': 'indices/_types/IndexSettings.ts#L232-L236', -'indices._types.SlowlogSettings': 'indices/_types/IndexSettings.ts#L515-L520', -'indices._types.SlowlogTresholdLevels': 'indices/_types/IndexSettings.ts#L527-L532', -'indices._types.SlowlogTresholds': 'indices/_types/IndexSettings.ts#L522-L525', +'indices._types.SettingsAnalyze': 'indices/_types/IndexSettings.ts#L245-L248', +'indices._types.SettingsHighlight': 'indices/_types/IndexSettings.ts#L240-L243', +'indices._types.SettingsQueryString': 'indices/_types/IndexSettings.ts#L260-L262', +'indices._types.SettingsSearch': 'indices/_types/IndexSettings.ts#L250-L253', +'indices._types.SettingsSimilarity': 'indices/_types/IndexSettings.ts#L180-L192', +'indices._types.SettingsSimilarityBm25': 'indices/_types/IndexSettings.ts#L198-L203', +'indices._types.SettingsSimilarityBoolean': 'indices/_types/IndexSettings.ts#L194-L196', +'indices._types.SettingsSimilarityDfi': 'indices/_types/IndexSettings.ts#L205-L208', +'indices._types.SettingsSimilarityDfr': 'indices/_types/IndexSettings.ts#L210-L215', +'indices._types.SettingsSimilarityIb': 'indices/_types/IndexSettings.ts#L217-L222', +'indices._types.SettingsSimilarityLmd': 'indices/_types/IndexSettings.ts#L224-L227', +'indices._types.SettingsSimilarityLmj': 'indices/_types/IndexSettings.ts#L229-L232', +'indices._types.SettingsSimilarityScripted': 'indices/_types/IndexSettings.ts#L234-L238', +'indices._types.SlowlogSettings': 'indices/_types/IndexSettings.ts#L517-L522', +'indices._types.SlowlogTresholdLevels': 'indices/_types/IndexSettings.ts#L529-L534', +'indices._types.SlowlogTresholds': 'indices/_types/IndexSettings.ts#L524-L527', 'indices._types.SoftDeletes': 'indices/_types/IndexSettings.ts#L51-L64', -'indices._types.SourceMode': 'indices/_types/IndexSettings.ts#L509-L513', -'indices._types.Storage': 'indices/_types/IndexSettings.ts#L534-L543', -'indices._types.StorageType': 'indices/_types/IndexSettings.ts#L545-L573', +'indices._types.SourceMode': 'indices/_types/IndexSettings.ts#L511-L515', +'indices._types.Storage': 'indices/_types/IndexSettings.ts#L536-L545', +'indices._types.StorageType': 'indices/_types/IndexSettings.ts#L547-L575', 'indices._types.TemplateMapping': 'indices/_types/TemplateMapping.ts#L27-L34', -'indices._types.Translog': 'indices/_types/IndexSettings.ts#L355-L377', -'indices._types.TranslogDurability': 'indices/_types/IndexSettings.ts#L379-L394', -'indices._types.TranslogRetention': 'indices/_types/IndexSettings.ts#L396-L415', +'indices._types.Translog': 'indices/_types/IndexSettings.ts#L357-L379', +'indices._types.TranslogDurability': 'indices/_types/IndexSettings.ts#L381-L396', +'indices._types.TranslogRetention': 'indices/_types/IndexSettings.ts#L398-L417', 'indices.add_block.IndicesBlockOptions': 'indices/add_block/IndicesAddBlockRequest.ts#L91-L100', 'indices.add_block.IndicesBlockStatus': 'indices/add_block/IndicesAddBlockResponse.ts#L30-L33', 'indices.add_block.Request': 'indices/add_block/IndicesAddBlockRequest.ts#L24-L89', @@ -1555,13 +1570,13 @@ 'indices.analyze.TokenDetail': 'indices/analyze/types.ts#L71-L74', 'indices.cancel_migrate_reindex.Request': 'indices/cancel_migrate_reindex/MigrateCancelReindexRequest.ts#L23-L38', 'indices.cancel_migrate_reindex.Response': 'indices/cancel_migrate_reindex/MigrateCancelReindexResponse.ts#L22-L25', -'indices.clear_cache.Request': 'indices/clear_cache/IndicesClearCacheRequest.ts#L23-L100', +'indices.clear_cache.Request': 'indices/clear_cache/IndicesClearCacheRequest.ts#L23-L99', 'indices.clear_cache.Response': 'indices/clear_cache/IndicesClearCacheResponse.ts#L22-L25', 'indices.clone.Request': 'indices/clone/IndicesCloneRequest.ts#L27-L127', 'indices.clone.Response': 'indices/clone/IndicesCloneResponse.ts#L22-L28', 'indices.close.CloseIndexResult': 'indices/close/CloseIndexResponse.ts#L32-L35', 'indices.close.CloseShardResult': 'indices/close/CloseIndexResponse.ts#L37-L39', -'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L101', +'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L100', 'indices.close.Response': 'indices/close/CloseIndexResponse.ts#L24-L30', 'indices.create.Request': 'indices/create/IndicesCreateRequest.ts#L28-L115', 'indices.create.Response': 'indices/create/IndicesCreateResponse.ts#L22-L28', @@ -1573,14 +1588,16 @@ 'indices.data_streams_stats.DataStreamsStatsItem': 'indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65', 'indices.data_streams_stats.Request': 'indices/data_streams_stats/IndicesDataStreamsStatsRequest.ts#L23-L61', 'indices.data_streams_stats.Response': 'indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43', -'indices.delete.Request': 'indices/delete/IndicesDeleteRequest.ts#L24-L87', +'indices.delete.Request': 'indices/delete/IndicesDeleteRequest.ts#L24-L86', 'indices.delete.Response': 'indices/delete/IndicesDeleteResponse.ts#L22-L25', 'indices.delete_alias.Request': 'indices/delete_alias/IndicesDeleteAliasRequest.ts#L24-L70', 'indices.delete_alias.Response': 'indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L25', -'indices.delete_data_lifecycle.Request': 'indices/delete_data_lifecycle/IndicesDeleteDataLifecycleRequest.ts#L24-L47', +'indices.delete_data_lifecycle.Request': 'indices/delete_data_lifecycle/IndicesDeleteDataLifecycleRequest.ts#L24-L48', 'indices.delete_data_lifecycle.Response': 'indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L25', 'indices.delete_data_stream.Request': 'indices/delete_data_stream/IndicesDeleteDataStreamRequest.ts#L24-L59', 'indices.delete_data_stream.Response': 'indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L25', +'indices.delete_data_stream_options.Request': 'indices/delete_data_stream_options/IndicesDeleteDataStreamOptionsRequest.ts#L24-L46', +'indices.delete_data_stream_options.Response': 'indices/delete_data_stream_options/IndicesDeleteDataStreamOptionsResponse.ts#L22-L25', 'indices.delete_index_template.Request': 'indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts#L24-L60', 'indices.delete_index_template.Response': 'indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L25', 'indices.delete_template.Request': 'indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L61', @@ -1589,12 +1606,12 @@ 'indices.disk_usage.Response': 'indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25', 'indices.downsample.Request': 'indices/downsample/Request.ts#L24-L58', 'indices.downsample.Response': 'indices/downsample/Response.ts#L22-L25', -'indices.exists.Request': 'indices/exists/IndicesExistsRequest.ts#L23-L80', -'indices.exists_alias.Request': 'indices/exists_alias/IndicesExistsAliasRequest.ts#L24-L82', +'indices.exists.Request': 'indices/exists/IndicesExistsRequest.ts#L23-L79', +'indices.exists_alias.Request': 'indices/exists_alias/IndicesExistsAliasRequest.ts#L24-L81', 'indices.exists_index_template.Request': 'indices/exists_index_template/IndicesExistsIndexTemplateRequest.ts#L24-L61', 'indices.exists_template.Request': 'indices/exists_template/IndicesExistsTemplateRequest.ts#L24-L69', 'indices.explain_data_lifecycle.DataStreamLifecycleExplain': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41', -'indices.explain_data_lifecycle.Request': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L47', +'indices.explain_data_lifecycle.Request': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L48', 'indices.explain_data_lifecycle.Response': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29', 'indices.field_usage_stats.FieldSummary': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L58-L67', 'indices.field_usage_stats.FieldsUsageBody': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L33-L40', @@ -1604,7 +1621,7 @@ 'indices.field_usage_stats.ShardsStats': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L53-L56', 'indices.field_usage_stats.UsageStatsIndex': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L42-L44', 'indices.field_usage_stats.UsageStatsShards': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L46-L51', -'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L92', +'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L91', 'indices.flush.Response': 'indices/flush/IndicesFlushResponse.ts#L22-L25', 'indices.forcemerge.Request': 'indices/forcemerge/IndicesForceMergeRequest.ts#L24-L108', 'indices.forcemerge.Response': 'indices/forcemerge/IndicesForceMergeResponse.ts#L22-L25', @@ -1613,24 +1630,30 @@ 'indices.get.Request': 'indices/get/IndicesGetRequest.ts#L24-L96', 'indices.get.Response': 'indices/get/IndicesGetResponse.ts#L24-L27', 'indices.get_alias.IndexAliases': 'indices/get_alias/IndicesGetAliasResponse.ts#L37-L39', -'indices.get_alias.Request': 'indices/get_alias/IndicesGetAliasRequest.ts#L24-L93', +'indices.get_alias.Request': 'indices/get_alias/IndicesGetAliasRequest.ts#L24-L92', 'indices.get_alias.Response': 'indices/get_alias/IndicesGetAliasResponse.ts#L26-L35', 'indices.get_data_lifecycle.DataStreamWithLifecycle': 'indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30', 'indices.get_data_lifecycle.Request': 'indices/get_data_lifecycle/IndicesGetDataLifecycleRequest.ts#L24-L68', 'indices.get_data_lifecycle.Response': 'indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25', 'indices.get_data_lifecycle_stats.DataStreamStats': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L46-L59', -'indices.get_data_lifecycle_stats.Request': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsRequest.ts#L22-L38', +'indices.get_data_lifecycle_stats.Request': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsRequest.ts#L22-L39', 'indices.get_data_lifecycle_stats.Response': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L24-L44', 'indices.get_data_stream.Request': 'indices/get_data_stream/IndicesGetDataStreamRequest.ts#L24-L78', 'indices.get_data_stream.Response': 'indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24', -'indices.get_field_mapping.Request': 'indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L84', +'indices.get_data_stream_options.DataStreamWithOptions': 'indices/get_data_stream_options/IndicesGetDataStreamOptionsResponse.ts#L27-L30', +'indices.get_data_stream_options.Request': 'indices/get_data_stream_options/IndicesGetDataStreamOptionsRequest.ts#L24-L61', +'indices.get_data_stream_options.Response': 'indices/get_data_stream_options/IndicesGetDataStreamOptionsResponse.ts#L23-L25', +'indices.get_data_stream_settings.DataStreamSettings': 'indices/get_data_stream_settings/IndicesGetDataStreamSettingsResponse.ts#L29-L39', +'indices.get_data_stream_settings.Request': 'indices/get_data_stream_settings/IndicesGetDataStreamSettingsRequest.ts#L24-L57', +'indices.get_data_stream_settings.Response': 'indices/get_data_stream_settings/IndicesGetDataStreamSettingsResponse.ts#L22-L27', +'indices.get_field_mapping.Request': 'indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L83', 'indices.get_field_mapping.Response': 'indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27', 'indices.get_field_mapping.TypeFieldMappings': 'indices/get_field_mapping/types.ts#L24-L26', 'indices.get_index_template.IndexTemplateItem': 'indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32', 'indices.get_index_template.Request': 'indices/get_index_template/IndicesGetIndexTemplateRequest.ts#L24-L72', 'indices.get_index_template.Response': 'indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27', 'indices.get_mapping.IndexMappingRecord': 'indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32', -'indices.get_mapping.Request': 'indices/get_mapping/IndicesGetMappingRequest.ts#L24-L85', +'indices.get_mapping.Request': 'indices/get_mapping/IndicesGetMappingRequest.ts#L24-L84', 'indices.get_mapping.Response': 'indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27', 'indices.get_migrate_reindex_status.Request': 'indices/get_migrate_reindex_status/MigrateGetReindexStatusRequest.ts#L23-L38', 'indices.get_migrate_reindex_status.Response': 'indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L23-L36', @@ -1651,7 +1674,7 @@ 'indices.modify_data_stream.IndexAndDataStreamAction': 'indices/modify_data_stream/types.ts#L39-L44', 'indices.modify_data_stream.Request': 'indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L45', 'indices.modify_data_stream.Response': 'indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L25', -'indices.open.Request': 'indices/open/IndicesOpenRequest.ts#L24-L111', +'indices.open.Request': 'indices/open/IndicesOpenRequest.ts#L24-L110', 'indices.open.Response': 'indices/open/IndicesOpenResponse.ts#L20-L25', 'indices.promote_data_stream.Request': 'indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L58', 'indices.promote_data_stream.Response': 'indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25', @@ -1659,12 +1682,19 @@ 'indices.put_alias.Response': 'indices/put_alias/IndicesPutAliasResponse.ts#L22-L25', 'indices.put_data_lifecycle.Request': 'indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L93', 'indices.put_data_lifecycle.Response': 'indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L25', +'indices.put_data_stream_options.Request': 'indices/put_data_stream_options/IndicesPutDataStreamOptionsRequest.ts#L25-L79', +'indices.put_data_stream_options.Response': 'indices/put_data_stream_options/IndicesPutDataStreamOptionsResponse.ts#L22-L25', +'indices.put_data_stream_settings.DataStreamSettingsError': 'indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L71-L77', +'indices.put_data_stream_settings.IndexSettingResults': 'indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L57-L69', +'indices.put_data_stream_settings.Request': 'indices/put_data_stream_settings/IndicesPutDataStreamSettingsRequest.ts#L25-L77', +'indices.put_data_stream_settings.Response': 'indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L23-L28', +'indices.put_data_stream_settings.UpdatedDataStreamSettings': 'indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L30-L55', 'indices.put_index_template.IndexTemplateMapping': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181', 'indices.put_index_template.Request': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L157', 'indices.put_index_template.Response': 'indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L25', -'indices.put_mapping.Request': 'indices/put_mapping/IndicesPutMappingRequest.ts#L41-L181', +'indices.put_mapping.Request': 'indices/put_mapping/IndicesPutMappingRequest.ts#L41-L164', 'indices.put_mapping.Response': 'indices/put_mapping/IndicesPutMappingResponse.ts#L22-L25', -'indices.put_settings.Request': 'indices/put_settings/IndicesPutSettingsRequest.ts#L25-L125', +'indices.put_settings.Request': 'indices/put_settings/IndicesPutSettingsRequest.ts#L25-L165', 'indices.put_settings.Response': 'indices/put_settings/IndicesPutSettingsResponse.ts#L22-L25', 'indices.put_template.Request': 'indices/put_template/IndicesPutTemplateRequest.ts#L29-L124', 'indices.put_template.Response': 'indices/put_template/IndicesPutTemplateResponse.ts#L22-L25', @@ -1680,16 +1710,16 @@ 'indices.recovery.ShardRecovery': 'indices/recovery/types.ts#L118-L135', 'indices.recovery.TranslogStatus': 'indices/recovery/types.ts#L102-L109', 'indices.recovery.VerifyIndex': 'indices/recovery/types.ts#L111-L116', -'indices.refresh.Request': 'indices/refresh/IndicesRefreshRequest.ts#L23-L84', +'indices.refresh.Request': 'indices/refresh/IndicesRefreshRequest.ts#L23-L83', 'indices.refresh.Response': 'indices/refresh/IndicesRefreshResponse.ts#L22-L25', 'indices.reload_search_analyzers.ReloadDetails': 'indices/reload_search_analyzers/types.ts#L27-L31', 'indices.reload_search_analyzers.ReloadResult': 'indices/reload_search_analyzers/types.ts#L22-L25', 'indices.reload_search_analyzers.Request': 'indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L63', 'indices.reload_search_analyzers.Response': 'indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L25', -'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L24-L144', +'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L24-L143', 'indices.resolve_cluster.ResolveClusterInfo': 'indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55', 'indices.resolve_cluster.Response': 'indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27', -'indices.resolve_index.Request': 'indices/resolve_index/ResolveIndexRequest.ts#L23-L69', +'indices.resolve_index.Request': 'indices/resolve_index/ResolveIndexRequest.ts#L23-L68', 'indices.resolve_index.ResolveIndexAliasItem': 'indices/resolve_index/ResolveIndexResponse.ts#L37-L40', 'indices.resolve_index.ResolveIndexDataStreamsItem': 'indices/resolve_index/ResolveIndexResponse.ts#L42-L46', 'indices.resolve_index.ResolveIndexItem': 'indices/resolve_index/ResolveIndexResponse.ts#L30-L35', @@ -1698,7 +1728,7 @@ 'indices.rollover.Response': 'indices/rollover/IndicesRolloverResponse.ts#L22-L32', 'indices.rollover.RolloverConditions': 'indices/rollover/types.ts#L24-L40', 'indices.segments.IndexSegment': 'indices/segments/types.ts#L24-L26', -'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L73', +'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L72', 'indices.segments.Response': 'indices/segments/IndicesSegmentsResponse.ts#L24-L29', 'indices.segments.Segment': 'indices/segments/types.ts#L28-L38', 'indices.segments.ShardSegmentRouting': 'indices/segments/types.ts#L40-L44', @@ -1746,104 +1776,135 @@ 'indices.update_aliases.Request': 'indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L59', 'indices.update_aliases.Response': 'indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L25', 'indices.validate_query.IndicesValidationExplanation': 'indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37', -'indices.validate_query.Request': 'indices/validate_query/IndicesValidateQueryRequest.ts#L25-L123', +'indices.validate_query.Request': 'indices/validate_query/IndicesValidateQueryRequest.ts#L25-L122', 'indices.validate_query.Response': 'indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30', -'inference._types.AdaptiveAllocations': 'inference/_types/CommonTypes.ts#L62-L79', -'inference._types.AlibabaCloudServiceSettings': 'inference/_types/CommonTypes.ts#L221-L266', -'inference._types.AlibabaCloudServiceType': 'inference/_types/CommonTypes.ts#L291-L293', -'inference._types.AlibabaCloudTaskSettings': 'inference/_types/CommonTypes.ts#L268-L282', -'inference._types.AlibabaCloudTaskType': 'inference/_types/CommonTypes.ts#L284-L289', -'inference._types.AmazonBedrockServiceSettings': 'inference/_types/CommonTypes.ts#L295-L337', -'inference._types.AmazonBedrockServiceType': 'inference/_types/CommonTypes.ts#L370-L372', -'inference._types.AmazonBedrockTaskSettings': 'inference/_types/CommonTypes.ts#L339-L363', -'inference._types.AmazonBedrockTaskType': 'inference/_types/CommonTypes.ts#L365-L368', -'inference._types.AnthropicServiceSettings': 'inference/_types/CommonTypes.ts#L374-L390', -'inference._types.AnthropicServiceType': 'inference/_types/CommonTypes.ts#L423-L425', -'inference._types.AnthropicTaskSettings': 'inference/_types/CommonTypes.ts#L392-L417', -'inference._types.AnthropicTaskType': 'inference/_types/CommonTypes.ts#L419-L421', -'inference._types.AzureAiStudioServiceSettings': 'inference/_types/CommonTypes.ts#L427-L469', -'inference._types.AzureAiStudioServiceType': 'inference/_types/CommonTypes.ts#L506-L508', -'inference._types.AzureAiStudioTaskSettings': 'inference/_types/CommonTypes.ts#L471-L499', -'inference._types.AzureAiStudioTaskType': 'inference/_types/CommonTypes.ts#L501-L504', -'inference._types.AzureOpenAIServiceSettings': 'inference/_types/CommonTypes.ts#L510-L555', -'inference._types.AzureOpenAIServiceType': 'inference/_types/CommonTypes.ts#L570-L572', -'inference._types.AzureOpenAITaskSettings': 'inference/_types/CommonTypes.ts#L557-L563', -'inference._types.AzureOpenAITaskType': 'inference/_types/CommonTypes.ts#L565-L568', -'inference._types.CohereEmbeddingType': 'inference/_types/CommonTypes.ts#L627-L631', -'inference._types.CohereInputType': 'inference/_types/CommonTypes.ts#L633-L638', -'inference._types.CohereServiceSettings': 'inference/_types/CommonTypes.ts#L574-L615', -'inference._types.CohereServiceType': 'inference/_types/CommonTypes.ts#L623-L625', -'inference._types.CohereSimilarityType': 'inference/_types/CommonTypes.ts#L640-L644', -'inference._types.CohereTaskSettings': 'inference/_types/CommonTypes.ts#L652-L684', -'inference._types.CohereTaskType': 'inference/_types/CommonTypes.ts#L617-L621', -'inference._types.CohereTruncateType': 'inference/_types/CommonTypes.ts#L646-L650', +'inference._types.AdaptiveAllocations': 'inference/_types/CommonTypes.ts#L99-L116', +'inference._types.AlibabaCloudServiceSettings': 'inference/_types/CommonTypes.ts#L292-L337', +'inference._types.AlibabaCloudServiceType': 'inference/_types/CommonTypes.ts#L362-L364', +'inference._types.AlibabaCloudTaskSettings': 'inference/_types/CommonTypes.ts#L339-L353', +'inference._types.AlibabaCloudTaskType': 'inference/_types/CommonTypes.ts#L355-L360', +'inference._types.AmazonBedrockServiceSettings': 'inference/_types/CommonTypes.ts#L366-L408', +'inference._types.AmazonBedrockServiceType': 'inference/_types/CommonTypes.ts#L441-L443', +'inference._types.AmazonBedrockTaskSettings': 'inference/_types/CommonTypes.ts#L410-L434', +'inference._types.AmazonBedrockTaskType': 'inference/_types/CommonTypes.ts#L436-L439', +'inference._types.AnthropicServiceSettings': 'inference/_types/CommonTypes.ts#L445-L461', +'inference._types.AnthropicServiceType': 'inference/_types/CommonTypes.ts#L494-L496', +'inference._types.AnthropicTaskSettings': 'inference/_types/CommonTypes.ts#L463-L488', +'inference._types.AnthropicTaskType': 'inference/_types/CommonTypes.ts#L490-L492', +'inference._types.AzureAiStudioServiceSettings': 'inference/_types/CommonTypes.ts#L498-L540', +'inference._types.AzureAiStudioServiceType': 'inference/_types/CommonTypes.ts#L577-L579', +'inference._types.AzureAiStudioTaskSettings': 'inference/_types/CommonTypes.ts#L542-L570', +'inference._types.AzureAiStudioTaskType': 'inference/_types/CommonTypes.ts#L572-L575', +'inference._types.AzureOpenAIServiceSettings': 'inference/_types/CommonTypes.ts#L581-L626', +'inference._types.AzureOpenAIServiceType': 'inference/_types/CommonTypes.ts#L641-L643', +'inference._types.AzureOpenAITaskSettings': 'inference/_types/CommonTypes.ts#L628-L634', +'inference._types.AzureOpenAITaskType': 'inference/_types/CommonTypes.ts#L636-L639', +'inference._types.CohereEmbeddingType': 'inference/_types/CommonTypes.ts#L700-L706', +'inference._types.CohereInputType': 'inference/_types/CommonTypes.ts#L708-L713', +'inference._types.CohereServiceSettings': 'inference/_types/CommonTypes.ts#L645-L688', +'inference._types.CohereServiceType': 'inference/_types/CommonTypes.ts#L696-L698', +'inference._types.CohereSimilarityType': 'inference/_types/CommonTypes.ts#L715-L719', +'inference._types.CohereTaskSettings': 'inference/_types/CommonTypes.ts#L727-L759', +'inference._types.CohereTaskType': 'inference/_types/CommonTypes.ts#L690-L694', +'inference._types.CohereTruncateType': 'inference/_types/CommonTypes.ts#L721-L725', 'inference._types.CompletionInferenceResult': 'inference/_types/Results.ts#L84-L89', 'inference._types.CompletionResult': 'inference/_types/Results.ts#L77-L82', -'inference._types.CompletionTool': 'inference/_types/CommonTypes.ts#L207-L219', -'inference._types.CompletionToolChoice': 'inference/_types/CommonTypes.ts#L170-L182', -'inference._types.CompletionToolChoiceFunction': 'inference/_types/CommonTypes.ts#L159-L168', -'inference._types.CompletionToolFunction': 'inference/_types/CommonTypes.ts#L184-L205', -'inference._types.CompletionToolType': 'inference/_types/CommonTypes.ts#L81-L84', -'inference._types.ContentObject': 'inference/_types/CommonTypes.ts#L86-L98', +'inference._types.CompletionTool': 'inference/_types/CommonTypes.ts#L278-L290', +'inference._types.CompletionToolChoice': 'inference/_types/CommonTypes.ts#L241-L253', +'inference._types.CompletionToolChoiceFunction': 'inference/_types/CommonTypes.ts#L230-L239', +'inference._types.CompletionToolFunction': 'inference/_types/CommonTypes.ts#L255-L276', +'inference._types.CompletionToolType': 'inference/_types/CommonTypes.ts#L118-L121', +'inference._types.ContentObject': 'inference/_types/CommonTypes.ts#L123-L135', 'inference._types.DeleteInferenceEndpointResult': 'inference/_types/Results.ts#L110-L115', -'inference._types.ElasticsearchServiceSettings': 'inference/_types/CommonTypes.ts#L706-L740', -'inference._types.ElasticsearchServiceType': 'inference/_types/CommonTypes.ts#L756-L758', -'inference._types.ElasticsearchTaskSettings': 'inference/_types/CommonTypes.ts#L742-L748', -'inference._types.ElasticsearchTaskType': 'inference/_types/CommonTypes.ts#L750-L754', -'inference._types.ElserServiceSettings': 'inference/_types/CommonTypes.ts#L760-L786', -'inference._types.ElserServiceType': 'inference/_types/CommonTypes.ts#L792-L794', -'inference._types.ElserTaskType': 'inference/_types/CommonTypes.ts#L788-L790', -'inference._types.GoogleAiServiceType': 'inference/_types/CommonTypes.ts#L819-L821', -'inference._types.GoogleAiStudioServiceSettings': 'inference/_types/CommonTypes.ts#L796-L812', -'inference._types.GoogleAiStudioTaskType': 'inference/_types/CommonTypes.ts#L814-L817', -'inference._types.GoogleVertexAIServiceSettings': 'inference/_types/CommonTypes.ts#L823-L849', -'inference._types.GoogleVertexAIServiceType': 'inference/_types/CommonTypes.ts#L867-L869', -'inference._types.GoogleVertexAITaskSettings': 'inference/_types/CommonTypes.ts#L851-L860', -'inference._types.GoogleVertexAITaskType': 'inference/_types/CommonTypes.ts#L862-L865', -'inference._types.HuggingFaceServiceSettings': 'inference/_types/CommonTypes.ts#L871-L892', -'inference._types.HuggingFaceServiceType': 'inference/_types/CommonTypes.ts#L898-L900', -'inference._types.HuggingFaceTaskType': 'inference/_types/CommonTypes.ts#L894-L896', -'inference._types.InferenceChunkingSettings': 'inference/_types/Services.ts#L71-L100', -'inference._types.InferenceEndpoint': 'inference/_types/Services.ts#L24-L44', -'inference._types.InferenceEndpointInfo': 'inference/_types/Services.ts#L46-L58', -'inference._types.InferenceEndpointInfoJinaAi': 'inference/_types/Services.ts#L60-L69', +'inference._types.ElasticsearchServiceSettings': 'inference/_types/CommonTypes.ts#L781-L815', +'inference._types.ElasticsearchServiceType': 'inference/_types/CommonTypes.ts#L831-L833', +'inference._types.ElasticsearchTaskSettings': 'inference/_types/CommonTypes.ts#L817-L823', +'inference._types.ElasticsearchTaskType': 'inference/_types/CommonTypes.ts#L825-L829', +'inference._types.ElserServiceSettings': 'inference/_types/CommonTypes.ts#L835-L861', +'inference._types.ElserServiceType': 'inference/_types/CommonTypes.ts#L867-L869', +'inference._types.ElserTaskType': 'inference/_types/CommonTypes.ts#L863-L865', +'inference._types.GoogleAiServiceType': 'inference/_types/CommonTypes.ts#L894-L896', +'inference._types.GoogleAiStudioServiceSettings': 'inference/_types/CommonTypes.ts#L871-L887', +'inference._types.GoogleAiStudioTaskType': 'inference/_types/CommonTypes.ts#L889-L892', +'inference._types.GoogleVertexAIServiceSettings': 'inference/_types/CommonTypes.ts#L898-L924', +'inference._types.GoogleVertexAIServiceType': 'inference/_types/CommonTypes.ts#L944-L946', +'inference._types.GoogleVertexAITaskSettings': 'inference/_types/CommonTypes.ts#L926-L935', +'inference._types.GoogleVertexAITaskType': 'inference/_types/CommonTypes.ts#L937-L942', +'inference._types.HuggingFaceServiceSettings': 'inference/_types/CommonTypes.ts#L948-L980', +'inference._types.HuggingFaceServiceType': 'inference/_types/CommonTypes.ts#L1001-L1003', +'inference._types.HuggingFaceTaskSettings': 'inference/_types/CommonTypes.ts#L982-L992', +'inference._types.HuggingFaceTaskType': 'inference/_types/CommonTypes.ts#L994-L999', +'inference._types.InferenceChunkingSettings': 'inference/_types/Services.ts#L254-L283', +'inference._types.InferenceEndpoint': 'inference/_types/Services.ts#L42-L62', +'inference._types.InferenceEndpointInfo': 'inference/_types/Services.ts#L64-L76', +'inference._types.InferenceEndpointInfoAlibabaCloudAI': 'inference/_types/Services.ts#L89-L98', +'inference._types.InferenceEndpointInfoAmazonBedrock': 'inference/_types/Services.ts#L100-L109', +'inference._types.InferenceEndpointInfoAnthropic': 'inference/_types/Services.ts#L111-L120', +'inference._types.InferenceEndpointInfoAzureAIStudio': 'inference/_types/Services.ts#L122-L131', +'inference._types.InferenceEndpointInfoAzureOpenAI': 'inference/_types/Services.ts#L133-L142', +'inference._types.InferenceEndpointInfoCohere': 'inference/_types/Services.ts#L144-L153', +'inference._types.InferenceEndpointInfoELSER': 'inference/_types/Services.ts#L166-L175', +'inference._types.InferenceEndpointInfoElasticsearch': 'inference/_types/Services.ts#L155-L164', +'inference._types.InferenceEndpointInfoGoogleAIStudio': 'inference/_types/Services.ts#L177-L186', +'inference._types.InferenceEndpointInfoGoogleVertexAI': 'inference/_types/Services.ts#L188-L197', +'inference._types.InferenceEndpointInfoHuggingFace': 'inference/_types/Services.ts#L199-L208', +'inference._types.InferenceEndpointInfoJinaAi': 'inference/_types/Services.ts#L78-L87', +'inference._types.InferenceEndpointInfoMistral': 'inference/_types/Services.ts#L210-L219', +'inference._types.InferenceEndpointInfoOpenAI': 'inference/_types/Services.ts#L221-L230', +'inference._types.InferenceEndpointInfoVoyageAI': 'inference/_types/Services.ts#L232-L241', +'inference._types.InferenceEndpointInfoWatsonx': 'inference/_types/Services.ts#L243-L252', 'inference._types.InferenceResult': 'inference/_types/Results.ts#L117-L128', -'inference._types.JinaAIServiceSettings': 'inference/_types/CommonTypes.ts#L902-L931', -'inference._types.JinaAIServiceType': 'inference/_types/CommonTypes.ts#L961-L963', -'inference._types.JinaAISimilarityType': 'inference/_types/CommonTypes.ts#L965-L969', -'inference._types.JinaAITaskSettings': 'inference/_types/CommonTypes.ts#L933-L954', -'inference._types.JinaAITaskType': 'inference/_types/CommonTypes.ts#L956-L959', -'inference._types.JinaAITextEmbeddingTask': 'inference/_types/CommonTypes.ts#L971-L976', -'inference._types.Message': 'inference/_types/CommonTypes.ts#L137-L157', -'inference._types.MessageContent': 'inference/_types/CommonTypes.ts#L132-L135', -'inference._types.MistralServiceSettings': 'inference/_types/CommonTypes.ts#L978-L1005', -'inference._types.MistralServiceType': 'inference/_types/CommonTypes.ts#L1011-L1013', -'inference._types.MistralTaskType': 'inference/_types/CommonTypes.ts#L1007-L1009', -'inference._types.OpenAIServiceSettings': 'inference/_types/CommonTypes.ts#L1015-L1057', -'inference._types.OpenAIServiceType': 'inference/_types/CommonTypes.ts#L1073-L1075', -'inference._types.OpenAITaskSettings': 'inference/_types/CommonTypes.ts#L1059-L1065', -'inference._types.OpenAITaskType': 'inference/_types/CommonTypes.ts#L1067-L1071', +'inference._types.JinaAIServiceSettings': 'inference/_types/CommonTypes.ts#L1005-L1034', +'inference._types.JinaAIServiceType': 'inference/_types/CommonTypes.ts#L1064-L1066', +'inference._types.JinaAISimilarityType': 'inference/_types/CommonTypes.ts#L1068-L1072', +'inference._types.JinaAITaskSettings': 'inference/_types/CommonTypes.ts#L1036-L1057', +'inference._types.JinaAITaskType': 'inference/_types/CommonTypes.ts#L1059-L1062', +'inference._types.JinaAITextEmbeddingTask': 'inference/_types/CommonTypes.ts#L1074-L1079', +'inference._types.Message': 'inference/_types/CommonTypes.ts#L174-L228', +'inference._types.MessageContent': 'inference/_types/CommonTypes.ts#L169-L172', +'inference._types.MistralServiceSettings': 'inference/_types/CommonTypes.ts#L1081-L1108', +'inference._types.MistralServiceType': 'inference/_types/CommonTypes.ts#L1116-L1118', +'inference._types.MistralTaskType': 'inference/_types/CommonTypes.ts#L1110-L1114', +'inference._types.OpenAIServiceSettings': 'inference/_types/CommonTypes.ts#L1120-L1162', +'inference._types.OpenAIServiceType': 'inference/_types/CommonTypes.ts#L1178-L1180', +'inference._types.OpenAITaskSettings': 'inference/_types/CommonTypes.ts#L1164-L1170', +'inference._types.OpenAITaskType': 'inference/_types/CommonTypes.ts#L1172-L1176', 'inference._types.RankedDocument': 'inference/_types/Results.ts#L91-L101', -'inference._types.RateLimitSetting': 'inference/_types/Services.ts#L106-L111', -'inference._types.RequestChatCompletion': 'inference/_types/CommonTypes.ts#L25-L60', +'inference._types.RateLimitSetting': 'inference/_types/Services.ts#L289-L315', +'inference._types.RequestChatCompletion': 'inference/_types/CommonTypes.ts#L25-L97', 'inference._types.RerankedInferenceResult': 'inference/_types/Results.ts#L103-L108', 'inference._types.SparseEmbeddingInferenceResult': 'inference/_types/Results.ts#L40-L45', 'inference._types.SparseEmbeddingResult': 'inference/_types/Results.ts#L36-L38', 'inference._types.TaskType': 'inference/_types/TaskType.ts#L20-L29', +'inference._types.TaskTypeAlibabaCloudAI': 'inference/_types/TaskType.ts#L36-L41', +'inference._types.TaskTypeAmazonBedrock': 'inference/_types/TaskType.ts#L43-L46', +'inference._types.TaskTypeAnthropic': 'inference/_types/TaskType.ts#L48-L50', +'inference._types.TaskTypeAzureAIStudio': 'inference/_types/TaskType.ts#L52-L55', +'inference._types.TaskTypeAzureOpenAI': 'inference/_types/TaskType.ts#L57-L60', +'inference._types.TaskTypeCohere': 'inference/_types/TaskType.ts#L62-L66', +'inference._types.TaskTypeELSER': 'inference/_types/TaskType.ts#L74-L76', +'inference._types.TaskTypeElasticsearch': 'inference/_types/TaskType.ts#L68-L72', +'inference._types.TaskTypeGoogleAIStudio': 'inference/_types/TaskType.ts#L78-L81', +'inference._types.TaskTypeGoogleVertexAI': 'inference/_types/TaskType.ts#L83-L86', +'inference._types.TaskTypeHuggingFace': 'inference/_types/TaskType.ts#L88-L93', 'inference._types.TaskTypeJinaAi': 'inference/_types/TaskType.ts#L31-L34', +'inference._types.TaskTypeMistral': 'inference/_types/TaskType.ts#L95-L99', +'inference._types.TaskTypeOpenAI': 'inference/_types/TaskType.ts#L101-L105', +'inference._types.TaskTypeVoyageAI': 'inference/_types/TaskType.ts#L107-L110', +'inference._types.TaskTypeWatsonx': 'inference/_types/TaskType.ts#L112-L116', 'inference._types.TextEmbeddingByteResult': 'inference/_types/Results.ts#L53-L58', 'inference._types.TextEmbeddingInferenceResult': 'inference/_types/Results.ts#L67-L75', 'inference._types.TextEmbeddingResult': 'inference/_types/Results.ts#L60-L65', -'inference._types.ToolCall': 'inference/_types/CommonTypes.ts#L114-L130', -'inference._types.ToolCallFunction': 'inference/_types/CommonTypes.ts#L100-L112', -'inference._types.VoyageAIServiceSettings': 'inference/_types/CommonTypes.ts#L1077-L1108', -'inference._types.VoyageAIServiceType': 'inference/_types/CommonTypes.ts#L1141-L1143', -'inference._types.VoyageAITaskSettings': 'inference/_types/CommonTypes.ts#L1110-L1134', -'inference._types.VoyageAITaskType': 'inference/_types/CommonTypes.ts#L1136-L1139', -'inference._types.WatsonxServiceSettings': 'inference/_types/CommonTypes.ts#L1145-L1182', -'inference._types.WatsonxServiceType': 'inference/_types/CommonTypes.ts#L1188-L1190', -'inference._types.WatsonxTaskType': 'inference/_types/CommonTypes.ts#L1184-L1186', -'inference.chat_completion_unified.Request': 'inference/chat_completion_unified/UnifiedRequest.ts#L24-L64', +'inference._types.ToolCall': 'inference/_types/CommonTypes.ts#L151-L167', +'inference._types.ToolCallFunction': 'inference/_types/CommonTypes.ts#L137-L149', +'inference._types.VoyageAIServiceSettings': 'inference/_types/CommonTypes.ts#L1182-L1213', +'inference._types.VoyageAIServiceType': 'inference/_types/CommonTypes.ts#L1246-L1248', +'inference._types.VoyageAITaskSettings': 'inference/_types/CommonTypes.ts#L1215-L1239', +'inference._types.VoyageAITaskType': 'inference/_types/CommonTypes.ts#L1241-L1244', +'inference._types.WatsonxServiceSettings': 'inference/_types/CommonTypes.ts#L1250-L1288', +'inference._types.WatsonxServiceType': 'inference/_types/CommonTypes.ts#L1296-L1298', +'inference._types.WatsonxTaskType': 'inference/_types/CommonTypes.ts#L1290-L1294', +'inference.chat_completion_unified.Request': 'inference/chat_completion_unified/UnifiedRequest.ts#L24-L61', 'inference.chat_completion_unified.Response': 'inference/chat_completion_unified/UnifiedResponse.ts#L22-L25', 'inference.completion.Request': 'inference/completion/CompletionRequest.ts#L25-L63', 'inference.completion.Response': 'inference/completion/CompletionResponse.ts#L22-L25', @@ -1853,7 +1914,7 @@ 'inference.get.Response': 'inference/get/GetResponse.ts#L22-L26', 'inference.inference.Request': 'inference/inference/InferenceRequest.ts#L26-L91', 'inference.inference.Response': 'inference/inference/InferenceResponse.ts#L22-L25', -'inference.put.Request': 'inference/put/PutRequest.ts#L25-L60', +'inference.put.Request': 'inference/put/PutRequest.ts#L25-L78', 'inference.put.Response': 'inference/put/PutResponse.ts#L22-L25', 'inference.put_alibabacloud.Request': 'inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L30-L77', 'inference.put_alibabacloud.Response': 'inference/put_alibabacloud/PutAlibabaCloudResponse.ts#L22-L25', @@ -1875,17 +1936,17 @@ 'inference.put_googleaistudio.Response': 'inference/put_googleaistudio/PutGoogleAiStudioResponse.ts#L22-L25', 'inference.put_googlevertexai.Request': 'inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L30-L77', 'inference.put_googlevertexai.Response': 'inference/put_googlevertexai/PutGoogleVertexAiResponse.ts#L22-L25', -'inference.put_hugging_face.Request': 'inference/put_hugging_face/PutHuggingFaceRequest.ts#L29-L85', +'inference.put_hugging_face.Request': 'inference/put_hugging_face/PutHuggingFaceRequest.ts#L30-L113', 'inference.put_hugging_face.Response': 'inference/put_hugging_face/PutHuggingFaceResponse.ts#L22-L25', 'inference.put_jinaai.Request': 'inference/put_jinaai/PutJinaAiRequest.ts#L30-L80', 'inference.put_jinaai.Response': 'inference/put_jinaai/PutJinaAiResponse.ts#L22-L25', -'inference.put_mistral.Request': 'inference/put_mistral/PutMistralRequest.ts#L29-L72', +'inference.put_mistral.Request': 'inference/put_mistral/PutMistralRequest.ts#L29-L71', 'inference.put_mistral.Response': 'inference/put_mistral/PutMistralResponse.ts#L22-L25', 'inference.put_openai.Request': 'inference/put_openai/PutOpenAiRequest.ts#L30-L78', 'inference.put_openai.Response': 'inference/put_openai/PutOpenAiResponse.ts#L22-L25', 'inference.put_voyageai.Request': 'inference/put_voyageai/PutVoyageAIRequest.ts#L30-L79', 'inference.put_voyageai.Response': 'inference/put_voyageai/PutVoyageAIResponse.ts#L22-L25', -'inference.put_watsonx.Request': 'inference/put_watsonx/PutWatsonxRequest.ts#L28-L68', +'inference.put_watsonx.Request': 'inference/put_watsonx/PutWatsonxRequest.ts#L28-L67', 'inference.put_watsonx.Response': 'inference/put_watsonx/PutWatsonxResponse.ts#L22-L25', 'inference.rerank.Request': 'inference/rerank/RerankRequest.ts#L25-L72', 'inference.rerank.Response': 'inference/rerank/RerankResponse.ts#L22-L25', @@ -2332,7 +2393,7 @@ 'ml.put_datafeed.Response': 'ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49', 'ml.put_filter.Request': 'ml/put_filter/MlPutFilterRequest.ts#L23-L58', 'ml.put_filter.Response': 'ml/put_filter/MlPutFilterResponse.ts#L22-L28', -'ml.put_job.Request': 'ml/put_job/MlPutJobRequest.ts#L30-L157', +'ml.put_job.Request': 'ml/put_job/MlPutJobRequest.ts#L30-L151', 'ml.put_job.Response': 'ml/put_job/MlPutJobResponse.ts#L29-L52', 'ml.put_trained_model.AggregateOutput': 'ml/put_trained_model/types.ts#L101-L106', 'ml.put_trained_model.Definition': 'ml/put_trained_model/types.ts#L24-L29', @@ -2374,7 +2435,7 @@ 'ml.stop_trained_model_deployment.Response': 'ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22', 'ml.update_data_frame_analytics.Request': 'ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L80', 'ml.update_data_frame_analytics.Response': 'ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45', -'ml.update_datafeed.Request': 'ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L170', +'ml.update_datafeed.Request': 'ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L164', 'ml.update_datafeed.Response': 'ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49', 'ml.update_filter.Request': 'ml/update_filter/MlUpdateFilterRequest.ts#L23-L60', 'ml.update_filter.Response': 'ml/update_filter/MlUpdateFilterResponse.ts#L22-L28', @@ -2813,7 +2874,7 @@ 'security.suggest_user_profiles.TotalUserProfiles': 'security/suggest_user_profiles/Response.ts#L24-L27', 'security.update_api_key.Request': 'security/update_api_key/Request.ts#L26-L91', 'security.update_api_key.Response': 'security/update_api_key/Response.ts#L20-L28', -'security.update_cross_cluster_api_key.Request': 'security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L83', +'security.update_cross_cluster_api_key.Request': 'security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L85', 'security.update_cross_cluster_api_key.Response': 'security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyResponse.ts#L20-L28', 'security.update_settings.Request': 'security/update_settings/SecurityUpdateSettingsRequest.ts#L24-L71', 'security.update_settings.Response': 'security/update_settings/SecurityUpdateSettingsResponse.ts#L20-L24', @@ -2842,6 +2903,7 @@ 'slm._types.Policy': 'slm/_types/SnapshotLifecycle.ts#L86-L92', 'slm._types.Retention': 'slm/_types/SnapshotLifecycle.ts#L94-L107', 'slm._types.SnapshotLifecycle': 'slm/_types/SnapshotLifecycle.ts#L38-L59', +'slm._types.SnapshotPolicyStats': 'slm/_types/SnapshotLifecycle.ts#L153-L159', 'slm._types.Statistics': 'slm/_types/SnapshotLifecycle.ts#L61-L84', 'slm.delete_lifecycle.Request': 'slm/delete_lifecycle/DeleteSnapshotLifecycleRequest.ts#L24-L58', 'slm.delete_lifecycle.Response': 'slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L25', @@ -2852,7 +2914,7 @@ 'slm.get_lifecycle.Request': 'slm/get_lifecycle/GetSnapshotLifecycleRequest.ts#L24-L64', 'slm.get_lifecycle.Response': 'slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27', 'slm.get_stats.Request': 'slm/get_stats/GetSnapshotLifecycleStatsRequest.ts#L23-L51', -'slm.get_stats.Response': 'slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36', +'slm.get_stats.Response': 'slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L24-L37', 'slm.get_status.Request': 'slm/get_status/GetSnapshotLifecycleManagementStatusRequest.ts#L23-L54', 'slm.get_status.Response': 'slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24', 'slm.put_lifecycle.Request': 'slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L89', @@ -2886,6 +2948,7 @@ 'snapshot._types.SnapshotShardFailure': 'snapshot/_types/SnapshotShardFailure.ts#L23-L30', 'snapshot._types.SnapshotShardsStatus': 'snapshot/_types/SnapshotShardsStatus.ts#L24-L27', 'snapshot._types.SnapshotSort': 'snapshot/_types/SnapshotInfo.ts#L73-L93', +'snapshot._types.SnapshotState': 'snapshot/_types/SnapshotState.ts#L20-L31', 'snapshot._types.SnapshotStats': 'snapshot/_types/SnapshotStats.ts#L23-L42', 'snapshot._types.SourceOnlyRepository': 'snapshot/_types/SnapshotRepository.ts#L104-L114', 'snapshot._types.SourceOnlyRepositorySettings': 'snapshot/_types/SnapshotRepository.ts#L414-L441', @@ -2903,7 +2966,7 @@ 'snapshot.delete.Response': 'snapshot/delete/SnapshotDeleteResponse.ts#L22-L25', 'snapshot.delete_repository.Request': 'snapshot/delete_repository/SnapshotDeleteRepositoryRequest.ts#L24-L64', 'snapshot.delete_repository.Response': 'snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L25', -'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L27-L158', +'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L28-L166', 'snapshot.get.Response': 'snapshot/get/SnapshotGetResponse.ts#L25-L47', 'snapshot.get.SnapshotResponseItem': 'snapshot/get/SnapshotGetResponse.ts#L49-L53', 'snapshot.get_repository.Request': 'snapshot/get_repository/SnapshotGetRepositoryRequest.ts#L24-L68', @@ -3118,7 +3181,7 @@ 'watcher._types.WatchStatus': 'watcher/_types/Watch.ts#L49-L56', 'watcher._types.WebhookAction': 'watcher/_types/Actions.ts#L293-L293', 'watcher._types.WebhookResult': 'watcher/_types/Actions.ts#L295-L298', -'watcher.ack_watch.Request': 'watcher/ack_watch/WatcherAckWatchRequest.ts#L23-L61', +'watcher.ack_watch.Request': 'watcher/ack_watch/WatcherAckWatchRequest.ts#L23-L63', 'watcher.ack_watch.Response': 'watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24', 'watcher.activate_watch.Request': 'watcher/activate_watch/WatcherActivateWatchRequest.ts#L23-L45', 'watcher.activate_watch.Response': 'watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24', @@ -3126,7 +3189,7 @@ 'watcher.deactivate_watch.Response': 'watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24', 'watcher.delete_watch.Request': 'watcher/delete_watch/DeleteWatchRequest.ts#L23-L50', 'watcher.delete_watch.Response': 'watcher/delete_watch/DeleteWatchResponse.ts#L22-L24', -'watcher.execute_watch.Request': 'watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L105', +'watcher.execute_watch.Request': 'watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L107', 'watcher.execute_watch.Response': 'watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L34', 'watcher.execute_watch.WatchRecord': 'watcher/execute_watch/types.ts#L27-L39', 'watcher.get_settings.Request': 'watcher/get_settings/WatcherGetSettingsRequest.ts#L23-L45', @@ -3230,10 +3293,10 @@ if (hash.length > 1) { hash = hash.substring(1); } - window.location = "https://github.com/elastic/elasticsearch-specification/tree/d20fab727161e6e1c2c5941f0dfce83abaa92882/specification/" + (paths[hash] || ""); + window.location = "https://github.com/elastic/elasticsearch-specification/tree/09ebf27a057301e6f26581259d3c1d105074bdfd/specification/" + (paths[hash] || ""); - Please see the Elasticsearch API specification. + Please see the Elasticsearch API specification. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/explain_lifecycle/LifecycleExplainManaged.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/explain_lifecycle/LifecycleExplainManaged.java index ba0a50871..23226118b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/explain_lifecycle/LifecycleExplainManaged.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/explain_lifecycle/LifecycleExplainManaged.java @@ -142,6 +142,8 @@ public class LifecycleExplainManaged implements LifecycleExplainVariant, JsonpSe @Nullable private final Time timeSinceIndexCreation; + private final boolean skip; + // --------------------------------------------------------------------------------------------- private LifecycleExplainManaged(Builder builder) { @@ -172,6 +174,7 @@ private LifecycleExplainManaged(Builder builder) { this.stepTimeMillis = builder.stepTimeMillis; this.phaseExecution = builder.phaseExecution; this.timeSinceIndexCreation = builder.timeSinceIndexCreation; + this.skip = ApiTypeHelper.requireNonNull(builder.skip, this, "skip", false); } @@ -392,6 +395,13 @@ public final Time timeSinceIndexCreation() { return this.timeSinceIndexCreation; } + /** + * Required - API name: {@code skip} + */ + public final boolean skip() { + return this.skip; + } + /** * Serialize this object to JSON. */ @@ -540,6 +550,8 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.timeSinceIndexCreation.serialize(generator, mapper); } + generator.writeKey("skip"); + generator.write(this.skip); } @@ -634,6 +646,8 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private Time timeSinceIndexCreation; + private Boolean skip; + /** * API name: {@code action} */ @@ -888,6 +902,14 @@ public final Builder timeSinceIndexCreation(Function

    * API name: {@code expand_wildcards} */ @@ -252,9 +250,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -269,9 +265,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java index ae7f21fc6..c9805a72b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java @@ -148,9 +148,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -259,9 +257,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -276,9 +272,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStream.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStream.java index f7a346be3..933350b6f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStream.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStream.java @@ -96,6 +96,8 @@ public class DataStream implements JsonpSerializable { private final boolean rolloverOnWrite; + private final IndexSettings settings; + private final HealthStatus status; @Nullable @@ -126,6 +128,7 @@ private DataStream(Builder builder) { this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); this.replicated = builder.replicated; this.rolloverOnWrite = ApiTypeHelper.requireNonNull(builder.rolloverOnWrite, this, "rolloverOnWrite", false); + this.settings = ApiTypeHelper.requireNonNull(builder.settings, this, "settings"); this.status = ApiTypeHelper.requireNonNull(builder.status, this, "status"); this.system = builder.system; this.template = ApiTypeHelper.requireNonNull(builder.template, this, "template"); @@ -277,6 +280,16 @@ public final boolean rolloverOnWrite() { return this.rolloverOnWrite; } + /** + * Required - The settings specific to this data stream that will take + * precedence over the settings in the matching index template. + *

    + * API name: {@code settings} + */ + public final IndexSettings settings() { + return this.settings; + } + /** * Required - Health status of the data stream. This health status is based on * the state of the primary and replica shards of the stream’s backing indices. @@ -404,6 +417,9 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("rollover_on_write"); generator.write(this.rolloverOnWrite); + generator.writeKey("settings"); + this.settings.serialize(generator, mapper); + generator.writeKey("status"); this.status.serialize(generator, mapper); if (this.system != null) { @@ -468,6 +484,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Boolean rolloverOnWrite; + private IndexSettings settings; + private HealthStatus status; @Nullable @@ -691,6 +709,27 @@ public final Builder rolloverOnWrite(boolean value) { return this; } + /** + * Required - The settings specific to this data stream that will take + * precedence over the settings in the matching index template. + *

    + * API name: {@code settings} + */ + public final Builder settings(IndexSettings value) { + this.settings = value; + return this; + } + + /** + * Required - The settings specific to this data stream that will take + * precedence over the settings in the matching index template. + *

    + * API name: {@code settings} + */ + public final Builder settings(Function> fn) { + return this.settings(fn.apply(new IndexSettings.Builder()).build()); + } + /** * Required - Health status of the data stream. This health status is based on * the state of the primary and replica shards of the stream’s backing indices. @@ -799,6 +838,7 @@ protected static void setupDataStreamDeserializer(ObjectDeserializerAPI + * specification + */ +@JsonpDeserializable +public class DataStreamFailureStore implements JsonpSerializable { + @Nullable + private final Boolean enabled; + + @Nullable + private final FailureStoreLifecycle lifecycle; + + // --------------------------------------------------------------------------------------------- + + private DataStreamFailureStore(Builder builder) { + + this.enabled = builder.enabled; + this.lifecycle = builder.lifecycle; + + } + + public static DataStreamFailureStore of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If defined, it turns the failure store on/off + * (true/false) for this data stream. A data stream + * failure store that's disabled (enabled: false) will redirect no + * new failed indices to the failure store; however, it will not remove any + * existing data from the failure store. + *

    + * API name: {@code enabled} + */ + @Nullable + public final Boolean enabled() { + return this.enabled; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + @Nullable + public final FailureStoreLifecycle lifecycle() { + return this.lifecycle; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.enabled != null) { + generator.writeKey("enabled"); + generator.write(this.enabled); + + } + if (this.lifecycle != null) { + generator.writeKey("lifecycle"); + this.lifecycle.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DataStreamFailureStore}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private Boolean enabled; + + @Nullable + private FailureStoreLifecycle lifecycle; + + /** + * If defined, it turns the failure store on/off + * (true/false) for this data stream. A data stream + * failure store that's disabled (enabled: false) will redirect no + * new failed indices to the failure store; however, it will not remove any + * existing data from the failure store. + *

    + * API name: {@code enabled} + */ + public final Builder enabled(@Nullable Boolean value) { + this.enabled = value; + return this; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + public final Builder lifecycle(@Nullable FailureStoreLifecycle value) { + this.lifecycle = value; + return this; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + public final Builder lifecycle( + Function> fn) { + return this.lifecycle(fn.apply(new FailureStoreLifecycle.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DataStreamFailureStore}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamFailureStore build() { + _checkSingleUse(); + + return new DataStreamFailureStore(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DataStreamFailureStore} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DataStreamFailureStore::setupDataStreamFailureStoreDeserializer); + + protected static void setupDataStreamFailureStoreDeserializer( + ObjectDeserializer op) { + + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + op.add(Builder::lifecycle, FailureStoreLifecycle._DESERIALIZER, "lifecycle"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamFailureStoreTemplate.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamFailureStoreTemplate.java new file mode 100644 index 000000000..448163a3c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamFailureStoreTemplate.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices._types.DataStreamFailureStoreTemplate + +/** + * Template equivalent of DataStreamFailureStore that allows nullable values. + * + * @see API + * specification + */ +@JsonpDeserializable +public class DataStreamFailureStoreTemplate implements JsonpSerializable { + @Nullable + private final Boolean enabled; + + @Nullable + private final FailureStoreLifecycleTemplate lifecycle; + + // --------------------------------------------------------------------------------------------- + + private DataStreamFailureStoreTemplate(Builder builder) { + + this.enabled = builder.enabled; + this.lifecycle = builder.lifecycle; + + } + + public static DataStreamFailureStoreTemplate of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If defined, it turns the failure store on/off + * (true/false) for this data stream. A data stream + * failure store that's disabled (enabled: false) will redirect no + * new failed indices to the failure store; however, it will not remove any + * existing data from the failure store. + *

    + * API name: {@code enabled} + */ + @Nullable + public final Boolean enabled() { + return this.enabled; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + @Nullable + public final FailureStoreLifecycleTemplate lifecycle() { + return this.lifecycle; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.enabled != null) { + generator.writeKey("enabled"); + generator.write(this.enabled); + + } + if (this.lifecycle != null) { + generator.writeKey("lifecycle"); + this.lifecycle.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DataStreamFailureStoreTemplate}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private Boolean enabled; + + @Nullable + private FailureStoreLifecycleTemplate lifecycle; + + /** + * If defined, it turns the failure store on/off + * (true/false) for this data stream. A data stream + * failure store that's disabled (enabled: false) will redirect no + * new failed indices to the failure store; however, it will not remove any + * existing data from the failure store. + *

    + * API name: {@code enabled} + */ + public final Builder enabled(@Nullable Boolean value) { + this.enabled = value; + return this; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + public final Builder lifecycle(@Nullable FailureStoreLifecycleTemplate value) { + this.lifecycle = value; + return this; + } + + /** + * If defined, it specifies the lifecycle configuration for the failure store of + * this data stream. + *

    + * API name: {@code lifecycle} + */ + public final Builder lifecycle( + Function> fn) { + return this.lifecycle(fn.apply(new FailureStoreLifecycleTemplate.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DataStreamFailureStoreTemplate}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamFailureStoreTemplate build() { + _checkSingleUse(); + + return new DataStreamFailureStoreTemplate(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DataStreamFailureStoreTemplate} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DataStreamFailureStoreTemplate::setupDataStreamFailureStoreTemplateDeserializer); + + protected static void setupDataStreamFailureStoreTemplateDeserializer( + ObjectDeserializer op) { + + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + op.add(Builder::lifecycle, FailureStoreLifecycleTemplate._DESERIALIZER, "lifecycle"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptions.java new file mode 100644 index 000000000..0b28db42a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptions.java @@ -0,0 +1,177 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices._types.DataStreamOptions + +/** + * Data stream options contain the configuration of data stream level features + * for a given data stream, for example, the failure store configuration. + * + * @see API + * specification + */ +@JsonpDeserializable +public class DataStreamOptions implements JsonpSerializable { + @Nullable + private final DataStreamFailureStore failureStore; + + // --------------------------------------------------------------------------------------------- + + private DataStreamOptions(Builder builder) { + + this.failureStore = builder.failureStore; + + } + + public static DataStreamOptions of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If defined, it specifies configuration for the failure store of this data + * stream. + *

    + * API name: {@code failure_store} + */ + @Nullable + public final DataStreamFailureStore failureStore() { + return this.failureStore; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.failureStore != null) { + generator.writeKey("failure_store"); + this.failureStore.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DataStreamOptions}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private DataStreamFailureStore failureStore; + + /** + * If defined, it specifies configuration for the failure store of this data + * stream. + *

    + * API name: {@code failure_store} + */ + public final Builder failureStore(@Nullable DataStreamFailureStore value) { + this.failureStore = value; + return this; + } + + /** + * If defined, it specifies configuration for the failure store of this data + * stream. + *

    + * API name: {@code failure_store} + */ + public final Builder failureStore( + Function> fn) { + return this.failureStore(fn.apply(new DataStreamFailureStore.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DataStreamOptions}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamOptions build() { + _checkSingleUse(); + + return new DataStreamOptions(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DataStreamOptions} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DataStreamOptions::setupDataStreamOptionsDeserializer); + + protected static void setupDataStreamOptionsDeserializer(ObjectDeserializer op) { + + op.add(Builder::failureStore, DataStreamFailureStore._DESERIALIZER, "failure_store"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptionsTemplate.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptionsTemplate.java new file mode 100644 index 000000000..000dccb7b --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamOptionsTemplate.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices._types.DataStreamOptionsTemplate + +/** + * Data stream options template contains the same information as + * DataStreamOptions but allows them to be set explicitly to null. + * + * @see API + * specification + */ +@JsonpDeserializable +public class DataStreamOptionsTemplate implements JsonpSerializable { + @Nullable + private final DataStreamFailureStoreTemplate failureStore; + + // --------------------------------------------------------------------------------------------- + + private DataStreamOptionsTemplate(Builder builder) { + + this.failureStore = builder.failureStore; + + } + + public static DataStreamOptionsTemplate of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * API name: {@code failure_store} + */ + @Nullable + public final DataStreamFailureStoreTemplate failureStore() { + return this.failureStore; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.failureStore != null) { + generator.writeKey("failure_store"); + this.failureStore.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DataStreamOptionsTemplate}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private DataStreamFailureStoreTemplate failureStore; + + /** + * API name: {@code failure_store} + */ + public final Builder failureStore(@Nullable DataStreamFailureStoreTemplate value) { + this.failureStore = value; + return this; + } + + /** + * API name: {@code failure_store} + */ + public final Builder failureStore( + Function> fn) { + return this.failureStore(fn.apply(new DataStreamFailureStoreTemplate.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DataStreamOptionsTemplate}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamOptionsTemplate build() { + _checkSingleUse(); + + return new DataStreamOptionsTemplate(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DataStreamOptionsTemplate} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DataStreamOptionsTemplate::setupDataStreamOptionsTemplateDeserializer); + + protected static void setupDataStreamOptionsTemplateDeserializer( + ObjectDeserializer op) { + + op.add(Builder::failureStore, DataStreamFailureStoreTemplate._DESERIALIZER, "failure_store"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsRequest.java new file mode 100644 index 000000000..82c196c3e --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsRequest.java @@ -0,0 +1,330 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.ExpandWildcard; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.delete_data_stream_options.Request + +/** + * Delete data stream options. Removes the data stream options from a data + * stream. + * + * @see API + * specification + */ + +public class DeleteDataStreamOptionsRequest extends RequestBase { + private final List expandWildcards; + + @Nullable + private final Time masterTimeout; + + private final List name; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private DeleteDataStreamOptionsRequest(Builder builder) { + + this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); + this.masterTimeout = builder.masterTimeout; + this.name = ApiTypeHelper.unmodifiableRequired(builder.name, this, "name"); + this.timeout = builder.timeout; + + } + + public static DeleteDataStreamOptionsRequest of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Whether wildcard expressions should get expanded to open or closed indices + * (default: open) + *

    + * API name: {@code expand_wildcards} + */ + public final List expandWildcards() { + return this.expandWildcards; + } + + /** + * Specify timeout for connection to master + *

    + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Required - A comma-separated list of data streams of which the data stream + * options will be deleted; use * to get all data streams + *

    + * API name: {@code name} + */ + public final List name() { + return this.name; + } + + /** + * Explicit timestamp for the document + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DeleteDataStreamOptionsRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private List expandWildcards; + + @Nullable + private Time masterTimeout; + + private List name; + + @Nullable + private Time timeout; + + /** + * Whether wildcard expressions should get expanded to open or closed indices + * (default: open) + *

    + * API name: {@code expand_wildcards} + *

    + * Adds all elements of list to expandWildcards. + */ + public final Builder expandWildcards(List list) { + this.expandWildcards = _listAddAll(this.expandWildcards, list); + return this; + } + + /** + * Whether wildcard expressions should get expanded to open or closed indices + * (default: open) + *

    + * API name: {@code expand_wildcards} + *

    + * Adds one or more values to expandWildcards. + */ + public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... values) { + this.expandWildcards = _listAdd(this.expandWildcards, value, values); + return this; + } + + /** + * Specify timeout for connection to master + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Specify timeout for connection to master + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Required - A comma-separated list of data streams of which the data stream + * options will be deleted; use * to get all data streams + *

    + * API name: {@code name} + *

    + * Adds all elements of list to name. + */ + public final Builder name(List list) { + this.name = _listAddAll(this.name, list); + return this; + } + + /** + * Required - A comma-separated list of data streams of which the data stream + * options will be deleted; use * to get all data streams + *

    + * API name: {@code name} + *

    + * Adds one or more values to name. + */ + public final Builder name(String value, String... values) { + this.name = _listAdd(this.name, value, values); + return this; + } + + /** + * Explicit timestamp for the document + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Explicit timestamp for the document + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DeleteDataStreamOptionsRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DeleteDataStreamOptionsRequest build() { + _checkSingleUse(); + + return new DeleteDataStreamOptionsRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.delete_data_stream_options}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.delete_data_stream_options", + + // Request method + request -> { + return "DELETE"; + + }, + + // Request path + request -> { + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_data_stream"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.name.stream().map(v -> v).collect(Collectors.joining(",")), buf); + buf.append("/_options"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + params.put("name", request.name.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (ApiTypeHelper.isDefined(request.expandWildcards)) { + params.put("expand_wildcards", + request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, DeleteDataStreamOptionsResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsResponse.java new file mode 100644 index 000000000..050346699 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteDataStreamOptionsResponse.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.delete_data_stream_options.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DeleteDataStreamOptionsResponse extends AcknowledgedResponseBase { + // --------------------------------------------------------------------------------------------- + + private DeleteDataStreamOptionsResponse(Builder builder) { + super(builder); + + } + + public static DeleteDataStreamOptionsResponse of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DeleteDataStreamOptionsResponse}. + */ + + public static class Builder extends AcknowledgedResponseBase.AbstractBuilder + implements + ObjectBuilder { + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DeleteDataStreamOptionsResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DeleteDataStreamOptionsResponse build() { + _checkSingleUse(); + + return new DeleteDataStreamOptionsResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DeleteDataStreamOptionsResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DeleteDataStreamOptionsResponse::setupDeleteDataStreamOptionsResponseDeserializer); + + protected static void setupDeleteDataStreamOptionsResponseDeserializer( + ObjectDeserializer op) { + AcknowledgedResponseBase.setupAcknowledgedResponseBaseDeserializer(op); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java index e32305f49..9a339ff77 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java @@ -123,9 +123,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -222,9 +220,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -239,9 +235,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java index 7b2f6ab01..62298174d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java @@ -896,6 +896,42 @@ public final CompletableFuture deleteDataStream( return deleteDataStream(fn.apply(new DeleteDataStreamRequest.Builder()).build()); } + // ----- Endpoint: indices.delete_data_stream_options + + /** + * Delete data stream options. Removes the data stream options from a data + * stream. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture deleteDataStreamOptions( + DeleteDataStreamOptionsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DeleteDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Delete data stream options. Removes the data stream options from a data + * stream. + * + * @param fn + * a function that initializes a builder to create the + * {@link DeleteDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture deleteDataStreamOptions( + Function> fn) { + return deleteDataStreamOptions(fn.apply(new DeleteDataStreamOptionsRequest.Builder()).build()); + } + // ----- Endpoint: indices.delete_index_template /** @@ -1877,6 +1913,81 @@ public CompletableFuture getDataStream() { GetDataStreamRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: indices.get_data_stream_options + + /** + * Get data stream options. + *

    + * Get the data stream options configuration of one or more data streams. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getDataStreamOptions(GetDataStreamOptionsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get data stream options. + *

    + * Get the data stream options configuration of one or more data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getDataStreamOptions( + Function> fn) { + return getDataStreamOptions(fn.apply(new GetDataStreamOptionsRequest.Builder()).build()); + } + + // ----- Endpoint: indices.get_data_stream_settings + + /** + * Get data stream settings. + *

    + * Get setting information for one or more data streams. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getDataStreamSettings( + GetDataStreamSettingsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamSettingsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get data stream settings. + *

    + * Get setting information for one or more data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetDataStreamSettingsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getDataStreamSettings( + Function> fn) { + return getDataStreamSettings(fn.apply(new GetDataStreamSettingsRequest.Builder()).build()); + } + // ----- Endpoint: indices.get_field_mapping /** @@ -2532,6 +2643,89 @@ public final CompletableFuture putDataLifecycle( return putDataLifecycle(fn.apply(new PutDataLifecycleRequest.Builder()).build()); } + // ----- Endpoint: indices.put_data_stream_options + + /** + * Update data stream options. Update the data stream options of the specified + * data streams. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture putDataStreamOptions(PutDataStreamOptionsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Update data stream options. Update the data stream options of the specified + * data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture putDataStreamOptions( + Function> fn) { + return putDataStreamOptions(fn.apply(new PutDataStreamOptionsRequest.Builder()).build()); + } + + // ----- Endpoint: indices.put_data_stream_settings + + /** + * Update data stream settings. + *

    + * This API can be used to override settings on specific data streams. These + * overrides will take precedence over what is specified in the template that + * the data stream matches. To prevent your data stream from getting into an + * invalid state, only certain settings are allowed. If possible, the setting + * change is applied to all backing indices. Otherwise, it will be applied when + * the data stream is next rolled over. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture putDataStreamSettings( + PutDataStreamSettingsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutDataStreamSettingsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Update data stream settings. + *

    + * This API can be used to override settings on specific data streams. These + * overrides will take precedence over what is specified in the template that + * the data stream matches. To prevent your data stream from getting into an + * invalid state, only certain settings are allowed. If possible, the setting + * change is applied to all backing indices. Otherwise, it will be applied when + * the data stream is next rolled over. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutDataStreamSettingsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture putDataStreamSettings( + Function> fn) { + return putDataStreamSettings(fn.apply(new PutDataStreamSettingsRequest.Builder()).build()); + } + // ----- Endpoint: indices.put_index_template /** @@ -2649,40 +2843,21 @@ public final CompletableFuture putIndexTemplate( /** * Update field mappings. Add new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields - * and add new properties to existing object fields. For data streams, these - * changes are applied to all backing indices by default. - *

    - * Add multi-fields to an existing field - *

    - * Multi-fields let you index the same field in different ways. You can use this - * API to update the fields mapping parameter and enable multi-fields for an - * existing field. WARNING: If an index (or data stream) contains documents when - * you add a multi-field, those documents will not have values for the new - * multi-field. You can populate the new multi-field with the update by query - * API. - *

    - * Change supported mapping parameters for an existing field - *

    - * The documentation for each mapping parameter indicates whether you can update - * it for an existing field using this API. For example, you can use the update - * mapping API to update the ignore_above parameter. - *

    - * Change the mapping of an existing field - *

    - * Except for supported mapping parameters, you can't change the mapping or - * field type of an existing field. Changing an existing field could invalidate - * data that's already indexed. - *

    - * If you need to change the mapping of a field in a data stream's backing - * indices, refer to documentation about modifying data streams. If you need to - * change the mapping of a field in other indices, create a new index with the - * correct mapping and reindex your data into that index. - *

    - * Rename a field + * You can use the update mapping API to: + *

      + *
    • Add a new field to an existing index
    • + *
    • Update mappings for multiple indices in a single request
    • + *
    • Add new properties to an object field
    • + *
    • Enable multi-fields for an existing field
    • + *
    • Update supported mapping parameters
    • + *
    • Change a field's mapping using reindexing
    • + *
    • Rename a field using a field alias
    • + *
    *

    - * Renaming a field would invalidate data already indexed under the old field - * name. Instead, add an alias field to create an alternate field name. + * Learn how to use the update mapping API with practical examples in the + * Update + * mapping API examples guide. * * @see Documentation @@ -2698,40 +2873,21 @@ public CompletableFuture putMapping(PutMappingRequest reques /** * Update field mappings. Add new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields - * and add new properties to existing object fields. For data streams, these - * changes are applied to all backing indices by default. - *

    - * Add multi-fields to an existing field - *

    - * Multi-fields let you index the same field in different ways. You can use this - * API to update the fields mapping parameter and enable multi-fields for an - * existing field. WARNING: If an index (or data stream) contains documents when - * you add a multi-field, those documents will not have values for the new - * multi-field. You can populate the new multi-field with the update by query - * API. - *

    - * Change supported mapping parameters for an existing field - *

    - * The documentation for each mapping parameter indicates whether you can update - * it for an existing field using this API. For example, you can use the update - * mapping API to update the ignore_above parameter. - *

    - * Change the mapping of an existing field - *

    - * Except for supported mapping parameters, you can't change the mapping or - * field type of an existing field. Changing an existing field could invalidate - * data that's already indexed. - *

    - * If you need to change the mapping of a field in a data stream's backing - * indices, refer to documentation about modifying data streams. If you need to - * change the mapping of a field in other indices, create a new index with the - * correct mapping and reindex your data into that index. - *

    - * Rename a field + * You can use the update mapping API to: + *

    *

    - * Renaming a field would invalidate data already indexed under the old field - * name. Instead, add an alias field to create an alternate field name. + * Learn how to use the update mapping API with practical examples in the + * Update + * mapping API examples guide. * * @param fn * a function that initializes a builder to create the @@ -2754,10 +2910,58 @@ public final CompletableFuture putMapping( *

    * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2768,6 +2972,9 @@ public final CompletableFuture putMapping( * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. + * Refer to updating + * analyzers on existing indices for step-by-step examples. * * @see Documentation @@ -2787,10 +2994,58 @@ public CompletableFuture putSettings(PutIndicesSetti *

    * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2801,6 +3056,9 @@ public CompletableFuture putSettings(PutIndicesSetti * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. + * Refer to updating + * analyzers on existing indices for step-by-step examples. * * @param fn * a function that initializes a builder to create the @@ -2821,10 +3079,58 @@ public final CompletableFuture putSettings( *

    * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2835,6 +3141,9 @@ public final CompletableFuture putSettings( * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. + * Refer to updating + * analyzers on existing indices for step-by-step examples. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java index 750387959..094a3933d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java @@ -905,6 +905,43 @@ public final DeleteDataStreamResponse deleteDataStream( return deleteDataStream(fn.apply(new DeleteDataStreamRequest.Builder()).build()); } + // ----- Endpoint: indices.delete_data_stream_options + + /** + * Delete data stream options. Removes the data stream options from a data + * stream. + * + * @see Documentation + * on elastic.co + */ + + public DeleteDataStreamOptionsResponse deleteDataStreamOptions(DeleteDataStreamOptionsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DeleteDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Delete data stream options. Removes the data stream options from a data + * stream. + * + * @param fn + * a function that initializes a builder to create the + * {@link DeleteDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final DeleteDataStreamOptionsResponse deleteDataStreamOptions( + Function> fn) + throws IOException, ElasticsearchException { + return deleteDataStreamOptions(fn.apply(new DeleteDataStreamOptionsRequest.Builder()).build()); + } + // ----- Endpoint: indices.delete_index_template /** @@ -1901,6 +1938,84 @@ public GetDataStreamResponse getDataStream() throws IOException, ElasticsearchEx this.transportOptions); } + // ----- Endpoint: indices.get_data_stream_options + + /** + * Get data stream options. + *

    + * Get the data stream options configuration of one or more data streams. + * + * @see Documentation + * on elastic.co + */ + + public GetDataStreamOptionsResponse getDataStreamOptions(GetDataStreamOptionsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Get data stream options. + *

    + * Get the data stream options configuration of one or more data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final GetDataStreamOptionsResponse getDataStreamOptions( + Function> fn) + throws IOException, ElasticsearchException { + return getDataStreamOptions(fn.apply(new GetDataStreamOptionsRequest.Builder()).build()); + } + + // ----- Endpoint: indices.get_data_stream_settings + + /** + * Get data stream settings. + *

    + * Get setting information for one or more data streams. + * + * @see Documentation + * on elastic.co + */ + + public GetDataStreamSettingsResponse getDataStreamSettings(GetDataStreamSettingsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetDataStreamSettingsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Get data stream settings. + *

    + * Get setting information for one or more data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetDataStreamSettingsRequest} + * @see Documentation + * on elastic.co + */ + + public final GetDataStreamSettingsResponse getDataStreamSettings( + Function> fn) + throws IOException, ElasticsearchException { + return getDataStreamSettings(fn.apply(new GetDataStreamSettingsRequest.Builder()).build()); + } + // ----- Endpoint: indices.get_field_mapping /** @@ -2575,6 +2690,92 @@ public final PutDataLifecycleResponse putDataLifecycle( return putDataLifecycle(fn.apply(new PutDataLifecycleRequest.Builder()).build()); } + // ----- Endpoint: indices.put_data_stream_options + + /** + * Update data stream options. Update the data stream options of the specified + * data streams. + * + * @see Documentation + * on elastic.co + */ + + public PutDataStreamOptionsResponse putDataStreamOptions(PutDataStreamOptionsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutDataStreamOptionsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Update data stream options. Update the data stream options of the specified + * data streams. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutDataStreamOptionsRequest} + * @see Documentation + * on elastic.co + */ + + public final PutDataStreamOptionsResponse putDataStreamOptions( + Function> fn) + throws IOException, ElasticsearchException { + return putDataStreamOptions(fn.apply(new PutDataStreamOptionsRequest.Builder()).build()); + } + + // ----- Endpoint: indices.put_data_stream_settings + + /** + * Update data stream settings. + *

    + * This API can be used to override settings on specific data streams. These + * overrides will take precedence over what is specified in the template that + * the data stream matches. To prevent your data stream from getting into an + * invalid state, only certain settings are allowed. If possible, the setting + * change is applied to all backing indices. Otherwise, it will be applied when + * the data stream is next rolled over. + * + * @see Documentation + * on elastic.co + */ + + public PutDataStreamSettingsResponse putDataStreamSettings(PutDataStreamSettingsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutDataStreamSettingsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Update data stream settings. + *

    + * This API can be used to override settings on specific data streams. These + * overrides will take precedence over what is specified in the template that + * the data stream matches. To prevent your data stream from getting into an + * invalid state, only certain settings are allowed. If possible, the setting + * change is applied to all backing indices. Otherwise, it will be applied when + * the data stream is next rolled over. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutDataStreamSettingsRequest} + * @see Documentation + * on elastic.co + */ + + public final PutDataStreamSettingsResponse putDataStreamSettings( + Function> fn) + throws IOException, ElasticsearchException { + return putDataStreamSettings(fn.apply(new PutDataStreamSettingsRequest.Builder()).build()); + } + // ----- Endpoint: indices.put_index_template /** @@ -2694,40 +2895,21 @@ public final PutIndexTemplateResponse putIndexTemplate( /** * Update field mappings. Add new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields - * and add new properties to existing object fields. For data streams, these - * changes are applied to all backing indices by default. - *

    - * Add multi-fields to an existing field - *

    - * Multi-fields let you index the same field in different ways. You can use this - * API to update the fields mapping parameter and enable multi-fields for an - * existing field. WARNING: If an index (or data stream) contains documents when - * you add a multi-field, those documents will not have values for the new - * multi-field. You can populate the new multi-field with the update by query - * API. - *

    - * Change supported mapping parameters for an existing field - *

    - * The documentation for each mapping parameter indicates whether you can update - * it for an existing field using this API. For example, you can use the update - * mapping API to update the ignore_above parameter. - *

    - * Change the mapping of an existing field - *

    - * Except for supported mapping parameters, you can't change the mapping or - * field type of an existing field. Changing an existing field could invalidate - * data that's already indexed. - *

    - * If you need to change the mapping of a field in a data stream's backing - * indices, refer to documentation about modifying data streams. If you need to - * change the mapping of a field in other indices, create a new index with the - * correct mapping and reindex your data into that index. - *

    - * Rename a field + * You can use the update mapping API to: + *

      + *
    • Add a new field to an existing index
    • + *
    • Update mappings for multiple indices in a single request
    • + *
    • Add new properties to an object field
    • + *
    • Enable multi-fields for an existing field
    • + *
    • Update supported mapping parameters
    • + *
    • Change a field's mapping using reindexing
    • + *
    • Rename a field using a field alias
    • + *
    *

    - * Renaming a field would invalidate data already indexed under the old field - * name. Instead, add an alias field to create an alternate field name. + * Learn how to use the update mapping API with practical examples in the + * Update + * mapping API examples guide. * * @see Documentation @@ -2743,40 +2925,21 @@ public PutMappingResponse putMapping(PutMappingRequest request) throws IOExcepti /** * Update field mappings. Add new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields - * and add new properties to existing object fields. For data streams, these - * changes are applied to all backing indices by default. - *

    - * Add multi-fields to an existing field - *

    - * Multi-fields let you index the same field in different ways. You can use this - * API to update the fields mapping parameter and enable multi-fields for an - * existing field. WARNING: If an index (or data stream) contains documents when - * you add a multi-field, those documents will not have values for the new - * multi-field. You can populate the new multi-field with the update by query - * API. - *

    - * Change supported mapping parameters for an existing field - *

    - * The documentation for each mapping parameter indicates whether you can update - * it for an existing field using this API. For example, you can use the update - * mapping API to update the ignore_above parameter. - *

    - * Change the mapping of an existing field - *

    - * Except for supported mapping parameters, you can't change the mapping or - * field type of an existing field. Changing an existing field could invalidate - * data that's already indexed. - *

    - * If you need to change the mapping of a field in a data stream's backing - * indices, refer to documentation about modifying data streams. If you need to - * change the mapping of a field in other indices, create a new index with the - * correct mapping and reindex your data into that index. - *

    - * Rename a field + * You can use the update mapping API to: + *

    *

    - * Renaming a field would invalidate data already indexed under the old field - * name. Instead, add an alias field to create an alternate field name. + * Learn how to use the update mapping API with practical examples in the + * Update + * mapping API examples guide. * * @param fn * a function that initializes a builder to create the @@ -2799,10 +2962,58 @@ public final PutMappingResponse putMapping(Function * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2813,6 +3024,9 @@ public final PutMappingResponse putMapping(Functionupdating + * analyzers on existing indices for step-by-step examples. * * @see Documentation @@ -2833,10 +3047,58 @@ public PutIndicesSettingsResponse putSettings(PutIndicesSettingsRequest request) *

    * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2847,6 +3109,9 @@ public PutIndicesSettingsResponse putSettings(PutIndicesSettingsRequest request) * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. + * Refer to updating + * analyzers on existing indices for step-by-step examples. * * @param fn * a function that initializes a builder to create the @@ -2868,10 +3133,58 @@ public final PutIndicesSettingsResponse putSettings( *

    * To revert a setting to the default value, use a null value. The list of * per-index settings that can be updated dynamically on live indices can be - * found in index module documentation. To preserve existing settings from being - * updated, set the preserve_existing parameter to + * found in index settings documentation. To preserve existing settings from + * being updated, set the preserve_existing parameter to * true. *

    + * For performance optimization during bulk indexing, you can disable the + * refresh interval. Refer to disable + * refresh interval for an example. There are multiple valid ways to + * represent index settings in the request body. You can specify only the + * setting, for example: + * + *

    +	 * {
    +	 *   "number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use an index setting object: + * + *

    +	 * {
    +	 *   "index": {
    +	 *     "number_of_replicas": 1
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can use dot annotation: + * + *

    +	 * {
    +	 *   "index.number_of_replicas": 1
    +	 * }
    +	 * 
    +	 * 
    + *

    + * Or you can embed any of the aforementioned options in a settings + * object. For example: + * + *

    +	 * {
    +	 *   "settings": {
    +	 *     "index": {
    +	 *       "number_of_replicas": 1
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    * NOTE: You can only define new analyzers on closed indices. To add an * analyzer, you must close the index, define the analyzer, and reopen the * index. You cannot close the write index of a data stream. To update the @@ -2882,6 +3195,9 @@ public final PutIndicesSettingsResponse putSettings( * after the rollover. However, it does not affect the data stream's backing * indices or their existing data. To change the analyzer for existing backing * indices, you must create a new data stream and reindex your data into it. + * Refer to updating + * analyzers on existing indices for step-by-step examples. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java index 789464b0d..0080deacc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java @@ -120,9 +120,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -216,9 +214,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -233,9 +229,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java index 57b1ed678..3c547ae1f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java @@ -123,9 +123,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -229,9 +227,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -246,9 +242,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycle.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycle.java new file mode 100644 index 000000000..c0f8b3de0 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycle.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices._types.FailureStoreLifecycle + +/** + * The failure store lifecycle configures the data stream lifecycle + * configuration for failure indices. + * + * @see API + * specification + */ +@JsonpDeserializable +public class FailureStoreLifecycle implements JsonpSerializable { + @Nullable + private final Time dataRetention; + + @Nullable + private final Boolean enabled; + + // --------------------------------------------------------------------------------------------- + + private FailureStoreLifecycle(Builder builder) { + + this.dataRetention = builder.dataRetention; + this.enabled = builder.enabled; + + } + + public static FailureStoreLifecycle of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + @Nullable + public final Time dataRetention() { + return this.dataRetention; + } + + /** + * If defined, it turns data stream lifecycle on/off + * (true/false) for this data stream. A data stream + * lifecycle that's disabled (enabled: false) will have no effect + * on the data stream. + *

    + * API name: {@code enabled} + */ + @Nullable + public final Boolean enabled() { + return this.enabled; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.dataRetention != null) { + generator.writeKey("data_retention"); + this.dataRetention.serialize(generator, mapper); + + } + if (this.enabled != null) { + generator.writeKey("enabled"); + generator.write(this.enabled); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FailureStoreLifecycle}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private Time dataRetention; + + @Nullable + private Boolean enabled; + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + public final Builder dataRetention(@Nullable Time value) { + this.dataRetention = value; + return this; + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + public final Builder dataRetention(Function> fn) { + return this.dataRetention(fn.apply(new Time.Builder()).build()); + } + + /** + * If defined, it turns data stream lifecycle on/off + * (true/false) for this data stream. A data stream + * lifecycle that's disabled (enabled: false) will have no effect + * on the data stream. + *

    + * API name: {@code enabled} + */ + public final Builder enabled(@Nullable Boolean value) { + this.enabled = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FailureStoreLifecycle}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FailureStoreLifecycle build() { + _checkSingleUse(); + + return new FailureStoreLifecycle(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FailureStoreLifecycle} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, FailureStoreLifecycle::setupFailureStoreLifecycleDeserializer); + + protected static void setupFailureStoreLifecycleDeserializer(ObjectDeserializer op) { + + op.add(Builder::dataRetention, Time._DESERIALIZER, "data_retention"); + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycleTemplate.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycleTemplate.java new file mode 100644 index 000000000..62e79d783 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FailureStoreLifecycleTemplate.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices._types.FailureStoreLifecycleTemplate + +/** + * Template equivalent of FailureStoreLifecycle that allows nullable values. + * + * @see API + * specification + */ +@JsonpDeserializable +public class FailureStoreLifecycleTemplate implements JsonpSerializable { + @Nullable + private final Time dataRetention; + + @Nullable + private final Boolean enabled; + + // --------------------------------------------------------------------------------------------- + + private FailureStoreLifecycleTemplate(Builder builder) { + + this.dataRetention = builder.dataRetention; + this.enabled = builder.enabled; + + } + + public static FailureStoreLifecycleTemplate of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + @Nullable + public final Time dataRetention() { + return this.dataRetention; + } + + /** + * If defined, it turns data stream lifecycle on/off + * (true/false) for this data stream. A data stream + * lifecycle that's disabled (enabled: false) will have no effect + * on the data stream. + *

    + * API name: {@code enabled} + */ + @Nullable + public final Boolean enabled() { + return this.enabled; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.dataRetention != null) { + generator.writeKey("data_retention"); + this.dataRetention.serialize(generator, mapper); + + } + if (this.enabled != null) { + generator.writeKey("enabled"); + generator.write(this.enabled); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FailureStoreLifecycleTemplate}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private Time dataRetention; + + @Nullable + private Boolean enabled; + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + public final Builder dataRetention(@Nullable Time value) { + this.dataRetention = value; + return this; + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

    + * API name: {@code data_retention} + */ + public final Builder dataRetention(Function> fn) { + return this.dataRetention(fn.apply(new Time.Builder()).build()); + } + + /** + * If defined, it turns data stream lifecycle on/off + * (true/false) for this data stream. A data stream + * lifecycle that's disabled (enabled: false) will have no effect + * on the data stream. + *

    + * API name: {@code enabled} + */ + public final Builder enabled(@Nullable Boolean value) { + this.enabled = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FailureStoreLifecycleTemplate}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FailureStoreLifecycleTemplate build() { + _checkSingleUse(); + + return new FailureStoreLifecycleTemplate(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FailureStoreLifecycleTemplate} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, FailureStoreLifecycleTemplate::setupFailureStoreLifecycleTemplateDeserializer); + + protected static void setupFailureStoreLifecycleTemplateDeserializer( + ObjectDeserializer op) { + + op.add(Builder::dataRetention, Time._DESERIALIZER, "data_retention"); + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java index f23846239..5d1fae1ad 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java @@ -135,9 +135,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -232,9 +230,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -249,9 +245,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java index 4e1d9c815..c80e603fe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java @@ -117,9 +117,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -213,9 +211,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -230,9 +226,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java index 5760e21a6..738bc9cbf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleRequest.java @@ -97,9 +97,7 @@ public static GetDataLifecycleRequest of(Functionopen,hidden. Valid values are: - * all, open, closed, - * hidden, none. + * comma-separated values, such as open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -161,9 +159,7 @@ public static class Builder extends RequestBase.AbstractBuilder /** * Type of data stream that wildcard patterns can match. Supports - * comma-separated values, such as open,hidden. Valid values are: - * all, open, closed, - * hidden, none. + * comma-separated values, such as open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -176,9 +172,7 @@ public final Builder expandWildcards(List list) { /** * Type of data stream that wildcard patterns can match. Supports - * comma-separated values, such as open,hidden. Valid values are: - * all, open, closed, - * hidden, none. + * comma-separated values, such as open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsRequest.java new file mode 100644 index 000000000..2b3b61eb5 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsRequest.java @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.ExpandWildcard; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_stream_options.Request + +/** + * Get data stream options. + *

    + * Get the data stream options configuration of one or more data streams. + * + * @see API + * specification + */ + +public class GetDataStreamOptionsRequest extends RequestBase { + private final List expandWildcards; + + @Nullable + private final Time masterTimeout; + + private final List name; + + // --------------------------------------------------------------------------------------------- + + private GetDataStreamOptionsRequest(Builder builder) { + + this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); + this.masterTimeout = builder.masterTimeout; + this.name = ApiTypeHelper.unmodifiableRequired(builder.name, this, "name"); + + } + + public static GetDataStreamOptionsRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Type of data stream that wildcard patterns can match. Supports + * comma-separated values, such as open,hidden. + *

    + * API name: {@code expand_wildcards} + */ + public final List expandWildcards() { + return this.expandWildcards; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Required - Comma-separated list of data streams to limit the request. + * Supports wildcards (*). To target all data streams, omit this + * parameter or use * or _all. + *

    + * API name: {@code name} + */ + public final List name() { + return this.name; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetDataStreamOptionsRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private List expandWildcards; + + @Nullable + private Time masterTimeout; + + private List name; + + /** + * Type of data stream that wildcard patterns can match. Supports + * comma-separated values, such as open,hidden. + *

    + * API name: {@code expand_wildcards} + *

    + * Adds all elements of list to expandWildcards. + */ + public final Builder expandWildcards(List list) { + this.expandWildcards = _listAddAll(this.expandWildcards, list); + return this; + } + + /** + * Type of data stream that wildcard patterns can match. Supports + * comma-separated values, such as open,hidden. + *

    + * API name: {@code expand_wildcards} + *

    + * Adds one or more values to expandWildcards. + */ + public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... values) { + this.expandWildcards = _listAdd(this.expandWildcards, value, values); + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Required - Comma-separated list of data streams to limit the request. + * Supports wildcards (*). To target all data streams, omit this + * parameter or use * or _all. + *

    + * API name: {@code name} + *

    + * Adds all elements of list to name. + */ + public final Builder name(List list) { + this.name = _listAddAll(this.name, list); + return this; + } + + /** + * Required - Comma-separated list of data streams to limit the request. + * Supports wildcards (*). To target all data streams, omit this + * parameter or use * or _all. + *

    + * API name: {@code name} + *

    + * Adds one or more values to name. + */ + public final Builder name(String value, String... values) { + this.name = _listAdd(this.name, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetDataStreamOptionsRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetDataStreamOptionsRequest build() { + _checkSingleUse(); + + return new GetDataStreamOptionsRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.get_data_stream_options}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.get_data_stream_options", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_data_stream"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.name.stream().map(v -> v).collect(Collectors.joining(",")), buf); + buf.append("/_options"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + params.put("name", request.name.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (ApiTypeHelper.isDefined(request.expandWildcards)) { + params.put("expand_wildcards", + request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, GetDataStreamOptionsResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsResponse.java new file mode 100644 index 000000000..0d19afebe --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamOptionsResponse.java @@ -0,0 +1,189 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch.indices.get_data_stream_options.DataStreamWithOptions; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_stream_options.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class GetDataStreamOptionsResponse implements JsonpSerializable { + private final List dataStreams; + + // --------------------------------------------------------------------------------------------- + + private GetDataStreamOptionsResponse(Builder builder) { + + this.dataStreams = ApiTypeHelper.unmodifiableRequired(builder.dataStreams, this, "dataStreams"); + + } + + public static GetDataStreamOptionsResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code data_streams} + */ + public final List dataStreams() { + return this.dataStreams; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (ApiTypeHelper.isDefined(this.dataStreams)) { + generator.writeKey("data_streams"); + generator.writeStartArray(); + for (DataStreamWithOptions item0 : this.dataStreams) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetDataStreamOptionsResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private List dataStreams; + + /** + * Required - API name: {@code data_streams} + *

    + * Adds all elements of list to dataStreams. + */ + public final Builder dataStreams(List list) { + this.dataStreams = _listAddAll(this.dataStreams, list); + return this; + } + + /** + * Required - API name: {@code data_streams} + *

    + * Adds one or more values to dataStreams. + */ + public final Builder dataStreams(DataStreamWithOptions value, DataStreamWithOptions... values) { + this.dataStreams = _listAdd(this.dataStreams, value, values); + return this; + } + + /** + * Required - API name: {@code data_streams} + *

    + * Adds a value to dataStreams using a builder lambda. + */ + public final Builder dataStreams( + Function> fn) { + return dataStreams(fn.apply(new DataStreamWithOptions.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetDataStreamOptionsResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetDataStreamOptionsResponse build() { + _checkSingleUse(); + + return new GetDataStreamOptionsResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link GetDataStreamOptionsResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, GetDataStreamOptionsResponse::setupGetDataStreamOptionsResponseDeserializer); + + protected static void setupGetDataStreamOptionsResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::dataStreams, JsonpDeserializer.arrayDeserializer(DataStreamWithOptions._DESERIALIZER), + "data_streams"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsRequest.java new file mode 100644 index 000000000..9c1320fa1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsRequest.java @@ -0,0 +1,247 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_stream_settings.Request + +/** + * Get data stream settings. + *

    + * Get setting information for one or more data streams. + * + * @see API + * specification + */ + +public class GetDataStreamSettingsRequest extends RequestBase { + @Nullable + private final Time masterTimeout; + + private final List name; + + // --------------------------------------------------------------------------------------------- + + private GetDataStreamSettingsRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.name = ApiTypeHelper.unmodifiableRequired(builder.name, this, "name"); + + } + + public static GetDataStreamSettingsRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Required - A comma-separated list of data streams or data stream patterns. + * Supports wildcards (*). + *

    + * API name: {@code name} + */ + public final List name() { + return this.name; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetDataStreamSettingsRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Time masterTimeout; + + private List name; + + /** + * The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Required - A comma-separated list of data streams or data stream patterns. + * Supports wildcards (*). + *

    + * API name: {@code name} + *

    + * Adds all elements of list to name. + */ + public final Builder name(List list) { + this.name = _listAddAll(this.name, list); + return this; + } + + /** + * Required - A comma-separated list of data streams or data stream patterns. + * Supports wildcards (*). + *

    + * API name: {@code name} + *

    + * Adds one or more values to name. + */ + public final Builder name(String value, String... values) { + this.name = _listAdd(this.name, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetDataStreamSettingsRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetDataStreamSettingsRequest build() { + _checkSingleUse(); + + return new GetDataStreamSettingsRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.get_data_stream_settings}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.get_data_stream_settings", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_data_stream"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.name.stream().map(v -> v).collect(Collectors.joining(",")), buf); + buf.append("/_settings"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + params.put("name", request.name.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, GetDataStreamSettingsResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsResponse.java new file mode 100644 index 000000000..66b25ef89 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataStreamSettingsResponse.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch.indices.get_data_stream_settings.DataStreamSettings; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_stream_settings.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class GetDataStreamSettingsResponse implements JsonpSerializable { + private final List dataStreams; + + // --------------------------------------------------------------------------------------------- + + private GetDataStreamSettingsResponse(Builder builder) { + + this.dataStreams = ApiTypeHelper.unmodifiableRequired(builder.dataStreams, this, "dataStreams"); + + } + + public static GetDataStreamSettingsResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code data_streams} + */ + public final List dataStreams() { + return this.dataStreams; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (ApiTypeHelper.isDefined(this.dataStreams)) { + generator.writeKey("data_streams"); + generator.writeStartArray(); + for (DataStreamSettings item0 : this.dataStreams) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetDataStreamSettingsResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private List dataStreams; + + /** + * Required - API name: {@code data_streams} + *

    + * Adds all elements of list to dataStreams. + */ + public final Builder dataStreams(List list) { + this.dataStreams = _listAddAll(this.dataStreams, list); + return this; + } + + /** + * Required - API name: {@code data_streams} + *

    + * Adds one or more values to dataStreams. + */ + public final Builder dataStreams(DataStreamSettings value, DataStreamSettings... values) { + this.dataStreams = _listAdd(this.dataStreams, value, values); + return this; + } + + /** + * Required - API name: {@code data_streams} + *

    + * Adds a value to dataStreams using a builder lambda. + */ + public final Builder dataStreams(Function> fn) { + return dataStreams(fn.apply(new DataStreamSettings.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetDataStreamSettingsResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetDataStreamSettingsResponse build() { + _checkSingleUse(); + + return new GetDataStreamSettingsResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link GetDataStreamSettingsResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, GetDataStreamSettingsResponse::setupGetDataStreamSettingsResponseDeserializer); + + protected static void setupGetDataStreamSettingsResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::dataStreams, JsonpDeserializer.arrayDeserializer(DataStreamSettings._DESERIALIZER), + "data_streams"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java index e4b9c5ac1..72d6592f4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java @@ -121,9 +121,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -216,9 +214,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -233,9 +229,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java index e9f08c307..052e64203 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java @@ -118,9 +118,7 @@ public final Boolean allowNoIndices() { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} */ @@ -220,9 +218,7 @@ public final Builder allowNoIndices(@Nullable Boolean value) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    @@ -237,9 +233,7 @@ public final Builder expandWildcards(List list) { * Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as - * open,hidden. Valid values are: all, - * open, closed, hidden, - * none. + * open,hidden. *

    * API name: {@code expand_wildcards} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java index 3e57ed978..f04619044 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexSettings.java @@ -530,7 +530,8 @@ public final Integer maxRefreshListeners() { /** * Settings to define analyzers, tokenizers, token filters and character - * filters. + * filters. Refer to the linked documentation for step-by-step examples of + * updating analyzers on existing indices. *

    * API name: {@code analyze} */ @@ -1587,7 +1588,8 @@ public final Builder maxRefreshListeners(@Nullable Integer value) { /** * Settings to define analyzers, tokenizers, token filters and character - * filters. + * filters. Refer to the linked documentation for step-by-step examples of + * updating analyzers on existing indices. *

    * API name: {@code analyze} */ @@ -1598,7 +1600,8 @@ public final Builder analyze(@Nullable SettingsAnalyze value) { /** * Settings to define analyzers, tokenizers, token filters and character - * filters. + * filters. Refer to the linked documentation for step-by-step examples of + * updating analyzers on existing indices. *

    * API name: {@code analyze} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexTemplateSummary.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexTemplateSummary.java index cccd8686e..4ff847cba 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexTemplateSummary.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndexTemplateSummary.java @@ -73,6 +73,9 @@ public class IndexTemplateSummary implements JsonpSerializable { @Nullable private final DataStreamLifecycleWithRollover lifecycle; + @Nullable + private final DataStreamOptionsTemplate dataStreamOptions; + // --------------------------------------------------------------------------------------------- private IndexTemplateSummary(Builder builder) { @@ -81,6 +84,7 @@ private IndexTemplateSummary(Builder builder) { this.mappings = builder.mappings; this.settings = builder.settings; this.lifecycle = builder.lifecycle; + this.dataStreamOptions = builder.dataStreamOptions; } @@ -129,6 +133,14 @@ public final DataStreamLifecycleWithRollover lifecycle() { return this.lifecycle; } + /** + * API name: {@code data_stream_options} + */ + @Nullable + public final DataStreamOptionsTemplate dataStreamOptions() { + return this.dataStreamOptions; + } + /** * Serialize this object to JSON. */ @@ -166,6 +178,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.lifecycle.serialize(generator, mapper); } + if (this.dataStreamOptions != null) { + generator.writeKey("data_stream_options"); + this.dataStreamOptions.serialize(generator, mapper); + + } } @@ -195,6 +212,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private DataStreamLifecycleWithRollover lifecycle; + @Nullable + private DataStreamOptionsTemplate dataStreamOptions; + /** * Aliases to add. If the index template includes a data_stream * object, these are data stream aliases. Otherwise, these are index aliases. @@ -295,6 +315,22 @@ public final Builder lifecycle( return this.lifecycle(fn.apply(new DataStreamLifecycleWithRollover.Builder()).build()); } + /** + * API name: {@code data_stream_options} + */ + public final Builder dataStreamOptions(@Nullable DataStreamOptionsTemplate value) { + this.dataStreamOptions = value; + return this; + } + + /** + * API name: {@code data_stream_options} + */ + public final Builder dataStreamOptions( + Function> fn) { + return this.dataStreamOptions(fn.apply(new DataStreamOptionsTemplate.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -327,6 +363,7 @@ protected static void setupIndexTemplateSummaryDeserializer(ObjectDeserializer